var/home/core/zuul-output/0000755000175000017500000000000015116516570014534 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015116530502015467 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000003426613315116530474017713 0ustar rootrootDec 11 10:34:42 crc systemd[1]: Starting Kubernetes Kubelet... Dec 11 10:34:42 crc restorecon[4694]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:42 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 10:34:43 crc restorecon[4694]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 11 10:34:43 crc restorecon[4694]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Dec 11 10:34:43 crc kubenswrapper[5016]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 11 10:34:43 crc kubenswrapper[5016]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Dec 11 10:34:43 crc kubenswrapper[5016]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 11 10:34:43 crc kubenswrapper[5016]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 11 10:34:43 crc kubenswrapper[5016]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Dec 11 10:34:43 crc kubenswrapper[5016]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.317583 5016 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324669 5016 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324702 5016 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324708 5016 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324713 5016 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324719 5016 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324725 5016 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324729 5016 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324733 5016 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324738 5016 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324742 5016 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324746 5016 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324751 5016 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324758 5016 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324766 5016 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324772 5016 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324777 5016 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324781 5016 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324785 5016 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324789 5016 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324794 5016 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324799 5016 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324803 5016 feature_gate.go:330] unrecognized feature gate: Example Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324806 5016 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324810 5016 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324814 5016 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324818 5016 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324825 5016 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324829 5016 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324832 5016 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324836 5016 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324840 5016 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324844 5016 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324847 5016 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324851 5016 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324855 5016 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324858 5016 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324861 5016 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324865 5016 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324868 5016 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324872 5016 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324875 5016 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324879 5016 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324882 5016 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324886 5016 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324889 5016 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324894 5016 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324898 5016 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324902 5016 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324905 5016 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324908 5016 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324912 5016 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324915 5016 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324919 5016 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324922 5016 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324926 5016 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324929 5016 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324932 5016 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324954 5016 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324960 5016 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324965 5016 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324968 5016 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324973 5016 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324977 5016 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324981 5016 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324987 5016 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324991 5016 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324994 5016 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.324998 5016 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.325002 5016 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.325005 5016 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.325009 5016 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325245 5016 flags.go:64] FLAG: --address="0.0.0.0" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325261 5016 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325268 5016 flags.go:64] FLAG: --anonymous-auth="true" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325274 5016 flags.go:64] FLAG: --application-metrics-count-limit="100" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325280 5016 flags.go:64] FLAG: --authentication-token-webhook="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325284 5016 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325290 5016 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325295 5016 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325299 5016 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325304 5016 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325308 5016 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325314 5016 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325318 5016 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325323 5016 flags.go:64] FLAG: --cgroup-root="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325328 5016 flags.go:64] FLAG: --cgroups-per-qos="true" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325336 5016 flags.go:64] FLAG: --client-ca-file="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325345 5016 flags.go:64] FLAG: --cloud-config="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325350 5016 flags.go:64] FLAG: --cloud-provider="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325355 5016 flags.go:64] FLAG: --cluster-dns="[]" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325364 5016 flags.go:64] FLAG: --cluster-domain="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325369 5016 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325374 5016 flags.go:64] FLAG: --config-dir="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325380 5016 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325385 5016 flags.go:64] FLAG: --container-log-max-files="5" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325393 5016 flags.go:64] FLAG: --container-log-max-size="10Mi" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325398 5016 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325403 5016 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325408 5016 flags.go:64] FLAG: --containerd-namespace="k8s.io" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325414 5016 flags.go:64] FLAG: --contention-profiling="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325419 5016 flags.go:64] FLAG: --cpu-cfs-quota="true" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325424 5016 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325429 5016 flags.go:64] FLAG: --cpu-manager-policy="none" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325433 5016 flags.go:64] FLAG: --cpu-manager-policy-options="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325439 5016 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325443 5016 flags.go:64] FLAG: --enable-controller-attach-detach="true" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325447 5016 flags.go:64] FLAG: --enable-debugging-handlers="true" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325451 5016 flags.go:64] FLAG: --enable-load-reader="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325455 5016 flags.go:64] FLAG: --enable-server="true" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325459 5016 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325464 5016 flags.go:64] FLAG: --event-burst="100" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325469 5016 flags.go:64] FLAG: --event-qps="50" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325473 5016 flags.go:64] FLAG: --event-storage-age-limit="default=0" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325477 5016 flags.go:64] FLAG: --event-storage-event-limit="default=0" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325481 5016 flags.go:64] FLAG: --eviction-hard="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325487 5016 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325491 5016 flags.go:64] FLAG: --eviction-minimum-reclaim="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325495 5016 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325499 5016 flags.go:64] FLAG: --eviction-soft="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325504 5016 flags.go:64] FLAG: --eviction-soft-grace-period="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325508 5016 flags.go:64] FLAG: --exit-on-lock-contention="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325512 5016 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325516 5016 flags.go:64] FLAG: --experimental-mounter-path="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325519 5016 flags.go:64] FLAG: --fail-cgroupv1="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325523 5016 flags.go:64] FLAG: --fail-swap-on="true" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325527 5016 flags.go:64] FLAG: --feature-gates="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325532 5016 flags.go:64] FLAG: --file-check-frequency="20s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325536 5016 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325540 5016 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325544 5016 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325550 5016 flags.go:64] FLAG: --healthz-port="10248" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325554 5016 flags.go:64] FLAG: --help="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325558 5016 flags.go:64] FLAG: --hostname-override="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325562 5016 flags.go:64] FLAG: --housekeeping-interval="10s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325566 5016 flags.go:64] FLAG: --http-check-frequency="20s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325570 5016 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325575 5016 flags.go:64] FLAG: --image-credential-provider-config="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325578 5016 flags.go:64] FLAG: --image-gc-high-threshold="85" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325583 5016 flags.go:64] FLAG: --image-gc-low-threshold="80" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325587 5016 flags.go:64] FLAG: --image-service-endpoint="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325591 5016 flags.go:64] FLAG: --kernel-memcg-notification="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325595 5016 flags.go:64] FLAG: --kube-api-burst="100" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325599 5016 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325604 5016 flags.go:64] FLAG: --kube-api-qps="50" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325608 5016 flags.go:64] FLAG: --kube-reserved="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325612 5016 flags.go:64] FLAG: --kube-reserved-cgroup="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325617 5016 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325621 5016 flags.go:64] FLAG: --kubelet-cgroups="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325625 5016 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325629 5016 flags.go:64] FLAG: --lock-file="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325633 5016 flags.go:64] FLAG: --log-cadvisor-usage="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325638 5016 flags.go:64] FLAG: --log-flush-frequency="5s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325642 5016 flags.go:64] FLAG: --log-json-info-buffer-size="0" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325655 5016 flags.go:64] FLAG: --log-json-split-stream="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325660 5016 flags.go:64] FLAG: --log-text-info-buffer-size="0" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325664 5016 flags.go:64] FLAG: --log-text-split-stream="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325668 5016 flags.go:64] FLAG: --logging-format="text" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325673 5016 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325678 5016 flags.go:64] FLAG: --make-iptables-util-chains="true" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325682 5016 flags.go:64] FLAG: --manifest-url="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325687 5016 flags.go:64] FLAG: --manifest-url-header="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325692 5016 flags.go:64] FLAG: --max-housekeeping-interval="15s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325697 5016 flags.go:64] FLAG: --max-open-files="1000000" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325702 5016 flags.go:64] FLAG: --max-pods="110" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325706 5016 flags.go:64] FLAG: --maximum-dead-containers="-1" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325710 5016 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325714 5016 flags.go:64] FLAG: --memory-manager-policy="None" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325718 5016 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325723 5016 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325726 5016 flags.go:64] FLAG: --node-ip="192.168.126.11" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325730 5016 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325740 5016 flags.go:64] FLAG: --node-status-max-images="50" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325744 5016 flags.go:64] FLAG: --node-status-update-frequency="10s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325748 5016 flags.go:64] FLAG: --oom-score-adj="-999" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325752 5016 flags.go:64] FLAG: --pod-cidr="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325756 5016 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325762 5016 flags.go:64] FLAG: --pod-manifest-path="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325766 5016 flags.go:64] FLAG: --pod-max-pids="-1" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325770 5016 flags.go:64] FLAG: --pods-per-core="0" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325774 5016 flags.go:64] FLAG: --port="10250" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325778 5016 flags.go:64] FLAG: --protect-kernel-defaults="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325782 5016 flags.go:64] FLAG: --provider-id="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325786 5016 flags.go:64] FLAG: --qos-reserved="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325790 5016 flags.go:64] FLAG: --read-only-port="10255" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325795 5016 flags.go:64] FLAG: --register-node="true" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325799 5016 flags.go:64] FLAG: --register-schedulable="true" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325802 5016 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325809 5016 flags.go:64] FLAG: --registry-burst="10" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325814 5016 flags.go:64] FLAG: --registry-qps="5" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325818 5016 flags.go:64] FLAG: --reserved-cpus="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325828 5016 flags.go:64] FLAG: --reserved-memory="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325833 5016 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325838 5016 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325843 5016 flags.go:64] FLAG: --rotate-certificates="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325852 5016 flags.go:64] FLAG: --rotate-server-certificates="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325861 5016 flags.go:64] FLAG: --runonce="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325867 5016 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325872 5016 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325878 5016 flags.go:64] FLAG: --seccomp-default="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325883 5016 flags.go:64] FLAG: --serialize-image-pulls="true" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325889 5016 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325894 5016 flags.go:64] FLAG: --storage-driver-db="cadvisor" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325899 5016 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325904 5016 flags.go:64] FLAG: --storage-driver-password="root" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325909 5016 flags.go:64] FLAG: --storage-driver-secure="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325914 5016 flags.go:64] FLAG: --storage-driver-table="stats" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325919 5016 flags.go:64] FLAG: --storage-driver-user="root" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325924 5016 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325930 5016 flags.go:64] FLAG: --sync-frequency="1m0s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325956 5016 flags.go:64] FLAG: --system-cgroups="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325962 5016 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325969 5016 flags.go:64] FLAG: --system-reserved-cgroup="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325973 5016 flags.go:64] FLAG: --tls-cert-file="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325977 5016 flags.go:64] FLAG: --tls-cipher-suites="[]" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325989 5016 flags.go:64] FLAG: --tls-min-version="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325993 5016 flags.go:64] FLAG: --tls-private-key-file="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.325997 5016 flags.go:64] FLAG: --topology-manager-policy="none" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.326002 5016 flags.go:64] FLAG: --topology-manager-policy-options="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.326007 5016 flags.go:64] FLAG: --topology-manager-scope="container" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.326012 5016 flags.go:64] FLAG: --v="2" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.326022 5016 flags.go:64] FLAG: --version="false" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.326029 5016 flags.go:64] FLAG: --vmodule="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.326036 5016 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.326041 5016 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326147 5016 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326157 5016 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326162 5016 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326166 5016 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326170 5016 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326174 5016 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326177 5016 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326181 5016 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326185 5016 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326188 5016 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326192 5016 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326196 5016 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326200 5016 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326203 5016 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326207 5016 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326210 5016 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326216 5016 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326220 5016 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326223 5016 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326226 5016 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326230 5016 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326234 5016 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326237 5016 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326240 5016 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326244 5016 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326247 5016 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326251 5016 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326254 5016 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326258 5016 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326261 5016 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326265 5016 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326270 5016 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326275 5016 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326279 5016 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326283 5016 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326288 5016 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326292 5016 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326296 5016 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326302 5016 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326306 5016 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326309 5016 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326314 5016 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326319 5016 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326323 5016 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326327 5016 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326331 5016 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326335 5016 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326339 5016 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326344 5016 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326348 5016 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326353 5016 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326357 5016 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326361 5016 feature_gate.go:330] unrecognized feature gate: Example Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326365 5016 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326369 5016 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326372 5016 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326376 5016 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326379 5016 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326382 5016 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326386 5016 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326389 5016 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326393 5016 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326396 5016 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326400 5016 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326403 5016 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326407 5016 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326410 5016 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326413 5016 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326417 5016 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326420 5016 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.326424 5016 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.326589 5016 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.332932 5016 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.332971 5016 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333025 5016 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333031 5016 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333036 5016 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333039 5016 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333043 5016 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333047 5016 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333050 5016 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333054 5016 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333057 5016 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333061 5016 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333064 5016 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333068 5016 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333071 5016 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333075 5016 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333079 5016 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333082 5016 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333085 5016 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333089 5016 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333092 5016 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333096 5016 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333099 5016 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333102 5016 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333107 5016 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333112 5016 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333117 5016 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333121 5016 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333124 5016 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333128 5016 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333132 5016 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333136 5016 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333140 5016 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333145 5016 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333148 5016 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333152 5016 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333156 5016 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333160 5016 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333164 5016 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333168 5016 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333173 5016 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333177 5016 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333181 5016 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333185 5016 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333189 5016 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333192 5016 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333196 5016 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333199 5016 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333203 5016 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333206 5016 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333210 5016 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333213 5016 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333217 5016 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333220 5016 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333225 5016 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333229 5016 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333233 5016 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333236 5016 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333239 5016 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333243 5016 feature_gate.go:330] unrecognized feature gate: Example Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333246 5016 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333251 5016 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333255 5016 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333259 5016 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333262 5016 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333266 5016 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333270 5016 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333274 5016 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333277 5016 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333280 5016 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333284 5016 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333287 5016 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333291 5016 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.333297 5016 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333416 5016 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333421 5016 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333426 5016 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333430 5016 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333434 5016 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333437 5016 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333443 5016 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333448 5016 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333452 5016 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333456 5016 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333460 5016 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333463 5016 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333467 5016 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333470 5016 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333474 5016 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333477 5016 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333480 5016 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333484 5016 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333487 5016 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333491 5016 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333494 5016 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333499 5016 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333503 5016 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333507 5016 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333511 5016 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333514 5016 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333518 5016 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333521 5016 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333525 5016 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333528 5016 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333532 5016 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333535 5016 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333538 5016 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333542 5016 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333546 5016 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333550 5016 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333554 5016 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333558 5016 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333561 5016 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333564 5016 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333568 5016 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333571 5016 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333575 5016 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333578 5016 feature_gate.go:330] unrecognized feature gate: Example Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333582 5016 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333587 5016 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333592 5016 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333596 5016 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333599 5016 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333603 5016 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333607 5016 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333610 5016 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333615 5016 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333618 5016 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333621 5016 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333625 5016 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333629 5016 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333633 5016 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333636 5016 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333640 5016 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333643 5016 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333648 5016 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333652 5016 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333655 5016 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333658 5016 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333662 5016 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333665 5016 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333669 5016 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333672 5016 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333676 5016 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.333680 5016 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.333685 5016 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.333821 5016 server.go:940] "Client rotation is on, will bootstrap in background" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.336280 5016 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.336362 5016 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.336810 5016 server.go:997] "Starting client certificate rotation" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.336829 5016 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.337123 5016 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-14 04:18:46.813645766 +0000 UTC Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.337190 5016 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.344392 5016 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.345528 5016 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.345785 5016 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.352269 5016 log.go:25] "Validated CRI v1 runtime API" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.370456 5016 log.go:25] "Validated CRI v1 image API" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.372074 5016 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.374069 5016 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-12-11-10-30-00-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.374113 5016 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.398789 5016 manager.go:217] Machine: {Timestamp:2025-12-11 10:34:43.397677179 +0000 UTC m=+0.216236768 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654116352 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:e1044399-4f18-4ebb-9d7a-d6302ff4a7fe BootID:a4f30830-8bfd-48d5-bda8-8a5b9692bf6a Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108168 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827056128 Type:vfs Inodes:4108168 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827060224 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:0e:53:52 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:0e:53:52 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:61:03:f0 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:d2:eb:1e Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:30:cb:4b Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:c7:f3:f1 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:42:1d:cc:5b:c9:48 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:3a:50:29:25:dd:f5 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654116352 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.399022 5016 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.399160 5016 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.399609 5016 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.399762 5016 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.399792 5016 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.400077 5016 topology_manager.go:138] "Creating topology manager with none policy" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.400086 5016 container_manager_linux.go:303] "Creating device plugin manager" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.400261 5016 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.400282 5016 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.400453 5016 state_mem.go:36] "Initialized new in-memory state store" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.400757 5016 server.go:1245] "Using root directory" path="/var/lib/kubelet" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.413894 5016 kubelet.go:418] "Attempting to sync node with API server" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.413920 5016 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.413964 5016 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.413980 5016 kubelet.go:324] "Adding apiserver pod source" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.413993 5016 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.415419 5016 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.415490 5016 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.415563 5016 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.415606 5016 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.415658 5016 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.416017 5016 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417003 5016 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417488 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417508 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417514 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417520 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417531 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417538 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417545 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417555 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417563 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417570 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417581 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417606 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.417748 5016 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.418128 5016 server.go:1280] "Started kubelet" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.420047 5016 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:43 crc systemd[1]: Started Kubernetes Kubelet. Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.418541 5016 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.418541 5016 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.421357 5016 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.421931 5016 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.421984 5016 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.422095 5016 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 22:05:05.235864657 +0000 UTC Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.422131 5016 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 851h30m21.813735689s for next certificate rotation Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.424138 5016 volume_manager.go:287] "The desired_state_of_world populator starts" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.424280 5016 volume_manager.go:289] "Starting Kubelet Volume Manager" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.424563 5016 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.425612 5016 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.426334 5016 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.426405 5016 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.426445 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="200ms" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.426647 5016 factory.go:55] Registering systemd factory Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.426667 5016 factory.go:221] Registration of the systemd container factory successfully Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.426887 5016 factory.go:153] Registering CRI-O factory Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.426904 5016 factory.go:221] Registration of the crio container factory successfully Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.426976 5016 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.427012 5016 factory.go:103] Registering Raw factory Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.427026 5016 manager.go:1196] Started watching for new ooms in manager Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.427660 5016 manager.go:319] Starting recovery of all containers Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.427774 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188022c4ff1a9d06 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:34:43.418103046 +0000 UTC m=+0.236662625,LastTimestamp:2025-12-11 10:34:43.418103046 +0000 UTC m=+0.236662625,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.432304 5016 server.go:460] "Adding debug handlers to kubelet server" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437619 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437677 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437691 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437706 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437722 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437736 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437750 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437763 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437778 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437792 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437806 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437818 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437856 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437897 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437911 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437924 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437958 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437972 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437986 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.437998 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438009 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438024 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438037 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438049 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438062 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438074 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438087 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438102 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438116 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438130 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438144 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438156 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438170 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438183 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438196 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438209 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438222 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438236 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438250 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438263 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438277 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438289 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438302 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438316 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438329 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438343 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438356 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438370 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438385 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438399 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438414 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438428 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438448 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438463 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438479 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438494 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438512 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438526 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.438539 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439371 5016 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439398 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439414 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439427 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439443 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439458 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439470 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439482 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439496 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439512 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439526 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439539 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439552 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439563 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439576 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439589 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439601 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439615 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439630 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439644 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439657 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439669 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439678 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439689 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439698 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439708 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439719 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439730 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439741 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439750 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439764 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439783 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439796 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439807 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439819 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439829 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439839 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439852 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439864 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439874 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439886 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439896 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439909 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439920 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.439931 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440008 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440029 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440042 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440055 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440067 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440121 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440134 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440146 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440162 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440175 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440187 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440198 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440209 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440221 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440232 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440242 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440253 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440263 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440274 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440288 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440299 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440310 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440321 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440333 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440347 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440358 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440371 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440384 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440396 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440408 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440421 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440434 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440446 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440468 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440482 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440495 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440508 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440522 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440535 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440551 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440565 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440579 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440593 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440607 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440622 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440635 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440649 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440662 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440676 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440703 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440715 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440728 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440741 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440754 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440766 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440779 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440793 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440804 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440816 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440828 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440841 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440854 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440867 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440879 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440891 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440904 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440917 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440931 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440973 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.440989 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441002 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441015 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441031 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441044 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441058 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441071 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441083 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441097 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441109 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441122 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441133 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441146 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441158 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441169 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441181 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441193 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441207 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441220 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441234 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441248 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441261 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441275 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441286 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441299 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441312 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441335 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441350 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441362 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441374 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441386 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441397 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441408 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441420 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441432 5016 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441449 5016 reconstruct.go:97] "Volume reconstruction finished" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.441457 5016 reconciler.go:26] "Reconciler: start to sync state" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.446421 5016 manager.go:324] Recovery completed Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.458764 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.460119 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.460152 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.460189 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.460899 5016 cpu_manager.go:225] "Starting CPU manager" policy="none" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.460919 5016 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.460954 5016 state_mem.go:36] "Initialized new in-memory state store" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.471517 5016 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.473174 5016 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.473223 5016 status_manager.go:217] "Starting to sync pod status with apiserver" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.473247 5016 kubelet.go:2335] "Starting kubelet main sync loop" Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.473288 5016 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Dec 11 10:34:43 crc kubenswrapper[5016]: W1211 10:34:43.474275 5016 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.474351 5016 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.476401 5016 policy_none.go:49] "None policy: Start" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.477255 5016 memory_manager.go:170] "Starting memorymanager" policy="None" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.477286 5016 state_mem.go:35] "Initializing new in-memory state store" Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.526614 5016 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.573434 5016 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.626781 5016 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.627058 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="400ms" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.635009 5016 manager.go:334] "Starting Device Plugin manager" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.635055 5016 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.635068 5016 server.go:79] "Starting device plugin registration server" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.635471 5016 eviction_manager.go:189] "Eviction manager: starting control loop" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.635487 5016 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.635635 5016 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.635756 5016 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.635774 5016 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.642870 5016 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.736128 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.737464 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.737502 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.737511 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.737534 5016 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.738136 5016 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.53:6443: connect: connection refused" node="crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.773898 5016 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc"] Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.774117 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.775770 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.775822 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.775834 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.775980 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.776372 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.776426 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.777378 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.777412 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.777489 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.777502 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.777469 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.777635 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.777820 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.777972 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.778014 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.779211 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.779238 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.779247 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.779340 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.779370 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.779389 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.779398 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.779496 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.779530 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.780090 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.780114 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.780122 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.780205 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.780349 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.780379 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.780885 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.780907 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.780916 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.781031 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.781058 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.781084 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.781059 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.781092 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.782119 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.782148 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.782160 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.782351 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.782483 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.782582 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.846550 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.846595 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.846619 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.846639 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.846661 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.846676 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.846690 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.846705 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.846767 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.847033 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.847064 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.847092 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.847108 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.847129 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.847143 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.974453 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.974520 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.974841 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.974879 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.974904 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.974926 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.974986 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975000 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975014 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975008 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975026 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975040 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975062 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.974619 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975113 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975118 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975090 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975097 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975140 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975164 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975077 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975098 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975169 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975170 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975206 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975253 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975277 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975288 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975320 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975340 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.975468 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.976133 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.976159 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.976168 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:43 crc kubenswrapper[5016]: I1211 10:34:43.976189 5016 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 10:34:43 crc kubenswrapper[5016]: E1211 10:34:43.976499 5016 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.53:6443: connect: connection refused" node="crc" Dec 11 10:34:44 crc kubenswrapper[5016]: E1211 10:34:44.028632 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="800ms" Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.096808 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.110478 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.117589 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 11 10:34:44 crc kubenswrapper[5016]: W1211 10:34:44.121644 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-b87628a82a59e1db96b5ebdd2e063c5238b207d2a07d8753ac8cad249bed8bc5 WatchSource:0}: Error finding container b87628a82a59e1db96b5ebdd2e063c5238b207d2a07d8753ac8cad249bed8bc5: Status 404 returned error can't find the container with id b87628a82a59e1db96b5ebdd2e063c5238b207d2a07d8753ac8cad249bed8bc5 Dec 11 10:34:44 crc kubenswrapper[5016]: W1211 10:34:44.133283 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-1a5cb61c3030945265be98add10951b6c88be0b837e721c1c48157abb3a73e9c WatchSource:0}: Error finding container 1a5cb61c3030945265be98add10951b6c88be0b837e721c1c48157abb3a73e9c: Status 404 returned error can't find the container with id 1a5cb61c3030945265be98add10951b6c88be0b837e721c1c48157abb3a73e9c Dec 11 10:34:44 crc kubenswrapper[5016]: W1211 10:34:44.134436 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-764929522f6e9271dd4254b8081fbe00624803bddc83ffefdb9325c8b24531e3 WatchSource:0}: Error finding container 764929522f6e9271dd4254b8081fbe00624803bddc83ffefdb9325c8b24531e3: Status 404 returned error can't find the container with id 764929522f6e9271dd4254b8081fbe00624803bddc83ffefdb9325c8b24531e3 Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.137108 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.144374 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:44 crc kubenswrapper[5016]: W1211 10:34:44.155305 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-f54e2c50dec3d1c49a6847a28492ae8881917108c9d4155fd8336c3acf8473bb WatchSource:0}: Error finding container f54e2c50dec3d1c49a6847a28492ae8881917108c9d4155fd8336c3acf8473bb: Status 404 returned error can't find the container with id f54e2c50dec3d1c49a6847a28492ae8881917108c9d4155fd8336c3acf8473bb Dec 11 10:34:44 crc kubenswrapper[5016]: W1211 10:34:44.169377 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-386fa254808a237f7e3e8114a5805e08f6f07b869a95aff97b66c38528430a94 WatchSource:0}: Error finding container 386fa254808a237f7e3e8114a5805e08f6f07b869a95aff97b66c38528430a94: Status 404 returned error can't find the container with id 386fa254808a237f7e3e8114a5805e08f6f07b869a95aff97b66c38528430a94 Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.377463 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.378514 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.378551 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.378564 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.378588 5016 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 10:34:44 crc kubenswrapper[5016]: E1211 10:34:44.379338 5016 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.53:6443: connect: connection refused" node="crc" Dec 11 10:34:44 crc kubenswrapper[5016]: W1211 10:34:44.379454 5016 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:44 crc kubenswrapper[5016]: E1211 10:34:44.379553 5016 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.422355 5016 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.484976 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f54e2c50dec3d1c49a6847a28492ae8881917108c9d4155fd8336c3acf8473bb"} Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.486955 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"764929522f6e9271dd4254b8081fbe00624803bddc83ffefdb9325c8b24531e3"} Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.488739 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"1a5cb61c3030945265be98add10951b6c88be0b837e721c1c48157abb3a73e9c"} Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.490026 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b87628a82a59e1db96b5ebdd2e063c5238b207d2a07d8753ac8cad249bed8bc5"} Dec 11 10:34:44 crc kubenswrapper[5016]: I1211 10:34:44.493541 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"386fa254808a237f7e3e8114a5805e08f6f07b869a95aff97b66c38528430a94"} Dec 11 10:34:44 crc kubenswrapper[5016]: W1211 10:34:44.507980 5016 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:44 crc kubenswrapper[5016]: E1211 10:34:44.508062 5016 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:44 crc kubenswrapper[5016]: E1211 10:34:44.831498 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="1.6s" Dec 11 10:34:44 crc kubenswrapper[5016]: W1211 10:34:44.926018 5016 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:44 crc kubenswrapper[5016]: E1211 10:34:44.926094 5016 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:45 crc kubenswrapper[5016]: W1211 10:34:44.999919 5016 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:45 crc kubenswrapper[5016]: E1211 10:34:45.000043 5016 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.180139 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.182163 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.182202 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.182210 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.182259 5016 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 10:34:45 crc kubenswrapper[5016]: E1211 10:34:45.182838 5016 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.53:6443: connect: connection refused" node="crc" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.377555 5016 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Dec 11 10:34:45 crc kubenswrapper[5016]: E1211 10:34:45.378637 5016 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.420875 5016 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.499787 5016 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0" exitCode=0 Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.499898 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0"} Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.499910 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.500877 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.500896 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.500905 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.503287 5016 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="986014f1f0751c2672711ddcf81e26b7b58f9c6a4fcd598966beb39cd8077c10" exitCode=0 Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.503357 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"986014f1f0751c2672711ddcf81e26b7b58f9c6a4fcd598966beb39cd8077c10"} Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.503407 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.509530 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.509569 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.509586 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.510975 5016 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="167bc18f623152b89836e139bd1f6eb00cd0754eb216c46d277dd0ce1f30c37d" exitCode=0 Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.511032 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"167bc18f623152b89836e139bd1f6eb00cd0754eb216c46d277dd0ce1f30c37d"} Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.511071 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.511847 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.511879 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.511890 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.515857 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ab0cfe1d8ecd790f0d9aae8e53944e730b397e1fd8409e5a3ef5ab590f956596"} Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.515916 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d2d2316a570824689ae8b4652bce6589ad8d06861d7bdfddd68e02452bb9f10d"} Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.515931 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2"} Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.517464 5016 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e" exitCode=0 Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.517501 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e"} Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.517571 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.518639 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.518680 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.518692 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.524867 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.525563 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.525617 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:45 crc kubenswrapper[5016]: I1211 10:34:45.525631 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:46 crc kubenswrapper[5016]: W1211 10:34:46.226330 5016 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:46 crc kubenswrapper[5016]: E1211 10:34:46.226419 5016 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.421375 5016 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:46 crc kubenswrapper[5016]: E1211 10:34:46.436460 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="3.2s" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.521583 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498"} Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.523743 5016 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc" exitCode=0 Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.523809 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc"} Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.523972 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.525419 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.525448 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.525460 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.526532 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.526584 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"c58056e87b369146dfa0e106e239233504e3106e8f7b0fcedfb6f7e279b9bbc7"} Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.527441 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.527462 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.527474 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.529001 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"d6120f501cfb5766a14ea5a8049da2760a8026eb34424665976d8e31d6d949d3"} Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.529038 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b15f212dd195c7099bd34e79971c3035dddf57f33d71fd44bbf71b1e067a9a7b"} Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.532818 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1"} Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.532965 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.534042 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.534082 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.534094 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:46 crc kubenswrapper[5016]: W1211 10:34:46.580731 5016 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:46 crc kubenswrapper[5016]: E1211 10:34:46.580808 5016 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:46 crc kubenswrapper[5016]: W1211 10:34:46.657291 5016 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:46 crc kubenswrapper[5016]: E1211 10:34:46.657413 5016 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.784920 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.786545 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.786582 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.786591 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:46 crc kubenswrapper[5016]: I1211 10:34:46.786613 5016 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 10:34:46 crc kubenswrapper[5016]: E1211 10:34:46.787041 5016 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.53:6443: connect: connection refused" node="crc" Dec 11 10:34:46 crc kubenswrapper[5016]: W1211 10:34:46.943981 5016 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:46 crc kubenswrapper[5016]: E1211 10:34:46.944451 5016 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.53:6443: connect: connection refused" logger="UnhandledError" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.421479 5016 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.53:6443: connect: connection refused Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.538781 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351"} Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.538838 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.538844 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4"} Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.538974 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43"} Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.538989 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166"} Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.539893 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.539927 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.539971 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.541145 5016 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5" exitCode=0 Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.541335 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.541793 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5"} Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.542147 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.542178 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.542191 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.544865 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"22fd974921305e1009cd5ac5bf0ef62bf92aa964d7e82ac544df7ca710bed84b"} Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.544981 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.545137 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.545483 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.547554 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.547666 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.547753 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.547554 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.547887 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.547900 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.547601 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.547961 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.547969 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:47 crc kubenswrapper[5016]: I1211 10:34:47.818667 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.278027 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.554141 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.554157 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.554214 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.554658 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d88a904ce0e220e5000a352352484e0aeac0dda15115ca38f6654ede993a6a5d"} Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.554730 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"21ac855626db2f18c17bf52510532154bf818297876b48f22f4b9bcb23201038"} Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.554746 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"67f3620eb07a4726b9869bb3a235b066ca330e4ec8d5a4ae6fe3a5236b70f4d1"} Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.554973 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.555061 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.555491 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.555517 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.555527 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.555540 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.555565 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.555576 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.555589 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.555604 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:48 crc kubenswrapper[5016]: I1211 10:34:48.555591 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.230435 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.500396 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.530253 5016 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.562925 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4915d3fe61301ca89c16f7f8b63cdd7b618d6b2096efd9834c0ef09396b5da9a"} Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.563037 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.563071 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"7a34a2d537d5a93deb3f35d272c93e721690126ff5fcf82f28246a0e582f32ab"} Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.563030 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.563199 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.563785 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.564279 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.564317 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.564330 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.564438 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.564475 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.564490 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.564779 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.564831 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.564845 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.570923 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.570992 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.571009 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.987153 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.988388 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.988500 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.988582 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:49 crc kubenswrapper[5016]: I1211 10:34:49.988701 5016 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 11 10:34:50 crc kubenswrapper[5016]: I1211 10:34:50.565714 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:50 crc kubenswrapper[5016]: I1211 10:34:50.565729 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:50 crc kubenswrapper[5016]: I1211 10:34:50.566676 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:50 crc kubenswrapper[5016]: I1211 10:34:50.566709 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:50 crc kubenswrapper[5016]: I1211 10:34:50.566732 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:50 crc kubenswrapper[5016]: I1211 10:34:50.566714 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:50 crc kubenswrapper[5016]: I1211 10:34:50.566742 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:50 crc kubenswrapper[5016]: I1211 10:34:50.566749 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:52 crc kubenswrapper[5016]: I1211 10:34:52.337480 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:52 crc kubenswrapper[5016]: I1211 10:34:52.338253 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:52 crc kubenswrapper[5016]: I1211 10:34:52.339231 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:52 crc kubenswrapper[5016]: I1211 10:34:52.339263 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:52 crc kubenswrapper[5016]: I1211 10:34:52.339272 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:52 crc kubenswrapper[5016]: I1211 10:34:52.912416 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Dec 11 10:34:52 crc kubenswrapper[5016]: I1211 10:34:52.912578 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:52 crc kubenswrapper[5016]: I1211 10:34:52.913814 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:52 crc kubenswrapper[5016]: I1211 10:34:52.913847 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:52 crc kubenswrapper[5016]: I1211 10:34:52.913856 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:53 crc kubenswrapper[5016]: E1211 10:34:53.643622 5016 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 11 10:34:55 crc kubenswrapper[5016]: I1211 10:34:55.338230 5016 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:34:55 crc kubenswrapper[5016]: I1211 10:34:55.338299 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 10:34:55 crc kubenswrapper[5016]: I1211 10:34:55.370731 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Dec 11 10:34:55 crc kubenswrapper[5016]: I1211 10:34:55.370917 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:55 crc kubenswrapper[5016]: I1211 10:34:55.372096 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:55 crc kubenswrapper[5016]: I1211 10:34:55.372128 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:55 crc kubenswrapper[5016]: I1211 10:34:55.372137 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:56 crc kubenswrapper[5016]: I1211 10:34:56.360738 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:56 crc kubenswrapper[5016]: I1211 10:34:56.360912 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:56 crc kubenswrapper[5016]: I1211 10:34:56.362067 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:56 crc kubenswrapper[5016]: I1211 10:34:56.362101 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:56 crc kubenswrapper[5016]: I1211 10:34:56.362113 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:56 crc kubenswrapper[5016]: I1211 10:34:56.365955 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:56 crc kubenswrapper[5016]: I1211 10:34:56.579581 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:56 crc kubenswrapper[5016]: I1211 10:34:56.580927 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:56 crc kubenswrapper[5016]: I1211 10:34:56.581054 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:56 crc kubenswrapper[5016]: I1211 10:34:56.581067 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:56 crc kubenswrapper[5016]: I1211 10:34:56.584099 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:34:57 crc kubenswrapper[5016]: I1211 10:34:57.581712 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:34:57 crc kubenswrapper[5016]: I1211 10:34:57.582534 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:34:57 crc kubenswrapper[5016]: I1211 10:34:57.582594 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:34:57 crc kubenswrapper[5016]: I1211 10:34:57.582607 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:34:57 crc kubenswrapper[5016]: I1211 10:34:57.818767 5016 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:34:57 crc kubenswrapper[5016]: I1211 10:34:57.818877 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 10:34:58 crc kubenswrapper[5016]: I1211 10:34:58.023143 5016 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 11 10:34:58 crc kubenswrapper[5016]: I1211 10:34:58.023204 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 11 10:35:02 crc kubenswrapper[5016]: I1211 10:35:02.824590 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:35:02 crc kubenswrapper[5016]: I1211 10:35:02.825125 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:35:02 crc kubenswrapper[5016]: I1211 10:35:02.826534 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:02 crc kubenswrapper[5016]: I1211 10:35:02.826588 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:02 crc kubenswrapper[5016]: I1211 10:35:02.826601 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:02 crc kubenswrapper[5016]: I1211 10:35:02.830599 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:35:03 crc kubenswrapper[5016]: E1211 10:35:03.009027 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Dec 11 10:35:04 crc kubenswrapper[5016]: E1211 10:35:04.252902 5016 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.253116 5016 trace.go:236] Trace[1028601678]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Dec-2025 10:34:49.881) (total time: 14371ms): Dec 11 10:35:04 crc kubenswrapper[5016]: Trace[1028601678]: ---"Objects listed" error: 14371ms (10:35:04.253) Dec 11 10:35:04 crc kubenswrapper[5016]: Trace[1028601678]: [14.371405168s] [14.371405168s] END Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.253139 5016 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.253537 5016 trace.go:236] Trace[1670847521]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Dec-2025 10:34:50.274) (total time: 13978ms): Dec 11 10:35:04 crc kubenswrapper[5016]: Trace[1670847521]: ---"Objects listed" error: 13978ms (10:35:04.253) Dec 11 10:35:04 crc kubenswrapper[5016]: Trace[1670847521]: [13.978886487s] [13.978886487s] END Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.253559 5016 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.270809 5016 trace.go:236] Trace[172329182]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Dec-2025 10:34:52.240) (total time: 12029ms): Dec 11 10:35:04 crc kubenswrapper[5016]: Trace[172329182]: ---"Objects listed" error: 12029ms (10:35:04.270) Dec 11 10:35:04 crc kubenswrapper[5016]: Trace[172329182]: [12.029809206s] [12.029809206s] END Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.270848 5016 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.270925 5016 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.271119 5016 trace.go:236] Trace[763798937]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Dec-2025 10:34:50.634) (total time: 13636ms): Dec 11 10:35:04 crc kubenswrapper[5016]: Trace[763798937]: ---"Objects listed" error: 13636ms (10:35:04.270) Dec 11 10:35:04 crc kubenswrapper[5016]: Trace[763798937]: [13.636625947s] [13.636625947s] END Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.271135 5016 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.271156 5016 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.271824 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.271863 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.271875 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.276161 5016 kubelet_node_status.go:115] "Node was previously registered" node="crc" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.276422 5016 kubelet_node_status.go:79] "Successfully registered node" node="crc" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.277961 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.278003 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.278014 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.278029 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.278041 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:04Z","lastTransitionTime":"2025-12-11T10:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.282319 5016 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.284128 5016 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:50590->192.168.126.11:17697: read: connection reset by peer" start-of-body= Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.284183 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:50590->192.168.126.11:17697: read: connection reset by peer" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.284131 5016 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:50606->192.168.126.11:17697: read: connection reset by peer" start-of-body= Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.284270 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:50606->192.168.126.11:17697: read: connection reset by peer" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.290274 5016 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.290371 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 11 10:35:04 crc kubenswrapper[5016]: E1211 10:35:04.296044 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a4f30830-8bfd-48d5-bda8-8a5b9692bf6a\\\",\\\"systemUUID\\\":\\\"e1044399-4f18-4ebb-9d7a-d6302ff4a7fe\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.302253 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.302293 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.302304 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.302320 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.302331 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:04Z","lastTransitionTime":"2025-12-11T10:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:04 crc kubenswrapper[5016]: E1211 10:35:04.314876 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a4f30830-8bfd-48d5-bda8-8a5b9692bf6a\\\",\\\"systemUUID\\\":\\\"e1044399-4f18-4ebb-9d7a-d6302ff4a7fe\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.318542 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.318655 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.318732 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.318797 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.318862 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:04Z","lastTransitionTime":"2025-12-11T10:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:04 crc kubenswrapper[5016]: E1211 10:35:04.327461 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a4f30830-8bfd-48d5-bda8-8a5b9692bf6a\\\",\\\"systemUUID\\\":\\\"e1044399-4f18-4ebb-9d7a-d6302ff4a7fe\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.330697 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.330734 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.330742 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.330759 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.330769 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:04Z","lastTransitionTime":"2025-12-11T10:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:04 crc kubenswrapper[5016]: E1211 10:35:04.340195 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a4f30830-8bfd-48d5-bda8-8a5b9692bf6a\\\",\\\"systemUUID\\\":\\\"e1044399-4f18-4ebb-9d7a-d6302ff4a7fe\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.342750 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.342776 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.342785 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.342796 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.342805 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:04Z","lastTransitionTime":"2025-12-11T10:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:04 crc kubenswrapper[5016]: E1211 10:35:04.350374 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a4f30830-8bfd-48d5-bda8-8a5b9692bf6a\\\",\\\"systemUUID\\\":\\\"e1044399-4f18-4ebb-9d7a-d6302ff4a7fe\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:04 crc kubenswrapper[5016]: E1211 10:35:04.350643 5016 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.352092 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.352185 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.352252 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.352320 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.352392 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:04Z","lastTransitionTime":"2025-12-11T10:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.454894 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.454949 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.454961 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.454976 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.454987 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:04Z","lastTransitionTime":"2025-12-11T10:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.557004 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.557061 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.557070 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.557086 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.557097 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:04Z","lastTransitionTime":"2025-12-11T10:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.659619 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.659649 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.659657 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.659670 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.659681 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:04Z","lastTransitionTime":"2025-12-11T10:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.762476 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.762520 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.762530 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.762544 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.762554 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:04Z","lastTransitionTime":"2025-12-11T10:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.782541 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.787146 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.865157 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.865189 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.865197 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.865210 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.865219 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:04Z","lastTransitionTime":"2025-12-11T10:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.968174 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.968217 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.968225 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.968247 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:04 crc kubenswrapper[5016]: I1211 10:35:04.968257 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:04Z","lastTransitionTime":"2025-12-11T10:35:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.071057 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.071092 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.071101 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.071114 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.071123 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:05Z","lastTransitionTime":"2025-12-11T10:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.175473 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.175524 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.175533 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.175550 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.175564 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:05Z","lastTransitionTime":"2025-12-11T10:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.254438 5016 apiserver.go:52] "Watching apiserver" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.257565 5016 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.260795 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c"] Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.261985 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.262275 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.262560 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.262597 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.262591 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.262628 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.262680 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.262759 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.263779 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.264737 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.264758 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.265197 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.267190 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.267415 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.267476 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.267480 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.267826 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.268224 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.274205 5016 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.274266 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.277595 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.277647 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.277667 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.277900 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.277932 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:05Z","lastTransitionTime":"2025-12-11T10:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.284015 5016 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.293003 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.303618 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.312912 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.322748 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.326624 5016 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.333554 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.342713 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.350050 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.357353 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10a14504-cb78-4a73-96dd-5fe8640132aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2d2316a570824689ae8b4652bce6589ad8d06861d7bdfddd68e02452bb9f10d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0cfe1d8ecd790f0d9aae8e53944e730b397e1fd8409e5a3ef5ab590f956596\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.366303 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"071ea1a0-65ea-49d7-a4b1-0f8a312c0112\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.375646 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.377213 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.377305 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.377339 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.377370 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.377408 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.377626 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.377670 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.377702 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.377727 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.377668 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.377736 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.377921 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:05.877891821 +0000 UTC m=+22.696451610 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.377751 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378092 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378164 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378198 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378203 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378227 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378233 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378255 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378389 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378428 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378414 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378462 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378493 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378482 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378523 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378553 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378581 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378606 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378637 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378661 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378689 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378712 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378737 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378762 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378788 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378852 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378866 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378881 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.378981 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379050 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379074 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379098 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379124 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379122 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379148 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379175 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379200 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379220 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379225 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379286 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379321 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379362 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379389 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379388 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379416 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379437 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379452 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379485 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379517 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379549 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379540 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379766 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379796 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379826 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379914 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.379048 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.381908 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.381931 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382022 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382079 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382121 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382127 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382242 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382310 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382355 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382399 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382434 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382476 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382515 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382555 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382588 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382625 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382601 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382722 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382741 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382758 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.383975 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384031 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384598 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384661 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384706 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384753 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384769 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384809 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384829 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384853 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384877 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384868 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:05Z","lastTransitionTime":"2025-12-11T10:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384927 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385010 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385074 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385114 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385148 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385177 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385209 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385439 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385567 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385611 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385643 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385671 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385692 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385724 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385776 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385805 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385833 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385862 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385890 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385924 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385967 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385990 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.386010 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.386032 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.386055 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.386078 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.386102 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.386128 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.386154 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.383790 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.388597 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382877 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.382917 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.383011 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.383318 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.383331 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.383386 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.383582 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.383559 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.383825 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.383879 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.383996 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384174 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384176 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384245 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384300 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.384686 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385173 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385388 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385566 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385674 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.385675 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.386033 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.386053 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.386991 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.387095 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.387216 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.387270 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.387545 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.387689 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.387696 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.388167 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.388199 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.388250 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.389159 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.388419 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.388525 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.389271 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.389361 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.389418 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.389532 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390056 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390105 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390126 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390199 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390227 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390262 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390282 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390312 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390349 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390373 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390405 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390467 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390605 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.390619 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.391337 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.392014 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.392982 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.392990 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.388810 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393053 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393097 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393121 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393141 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393167 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393191 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393214 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393234 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393263 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393291 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.388924 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.394114 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.388285 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.394964 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393058 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.388778 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393107 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393147 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393135 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393184 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393264 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393653 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393840 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.395094 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.395142 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.393955 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.395457 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.395523 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.395570 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.395722 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396129 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396185 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396198 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396204 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396340 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396477 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396494 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396505 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396521 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396532 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396492 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396674 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396794 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396874 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396464 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396877 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.396890 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397051 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397129 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397248 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397271 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397320 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397370 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397376 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397480 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397523 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397557 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397579 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397601 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397704 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397753 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397723 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397781 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397818 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397782 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397903 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.397993 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398021 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398025 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398054 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398069 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398084 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398114 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398143 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398167 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398176 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398193 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398220 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398246 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398279 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398308 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398340 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398365 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398395 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398423 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398452 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398477 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398500 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398525 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398578 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398600 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398625 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398649 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398676 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398690 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398700 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398760 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398788 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398819 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398844 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398869 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398894 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398919 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398967 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.398991 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399021 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399048 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399075 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399103 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399134 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399164 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399196 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399237 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399245 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399266 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399296 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399325 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399352 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399380 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399416 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399443 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399472 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399498 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399508 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399556 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399604 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399523 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399743 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399775 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399821 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399851 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399896 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.399920 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400081 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400162 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400198 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400290 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400314 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400362 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400387 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400408 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400495 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400516 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400539 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400136 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400583 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400163 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400229 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400308 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400610 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400502 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400652 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400834 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.401065 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.401123 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.401003 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.401683 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.401768 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.401811 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.401870 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.402162 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.402200 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.402203 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.402285 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.402318 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.402435 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.402454 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.402632 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.402809 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.402847 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.403084 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.403158 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.403438 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.403752 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.403847 5016 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.403930 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.400604 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404032 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404078 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404082 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404126 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404257 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404284 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404260 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404319 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404377 5016 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404398 5016 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404417 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404433 5016 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404451 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404466 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404482 5016 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404499 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404563 5016 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404583 5016 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404599 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404674 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404692 5016 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404707 5016 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404721 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404735 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404833 5016 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404871 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.404899 5016 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.404932 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.405246 5016 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.405327 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.405362 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.405449 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.405340 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.405357 5016 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.405773 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.405789 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.405788 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.405820 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.405891 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:05.905870525 +0000 UTC m=+22.724430294 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406048 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406087 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406176 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406196 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406211 5016 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406227 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406242 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406260 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406278 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406301 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406322 5016 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406339 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406353 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406367 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406379 5016 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406391 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406405 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406419 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406431 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406442 5016 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406452 5016 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406462 5016 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406471 5016 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406481 5016 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406491 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406504 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406514 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406524 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406533 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406543 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406555 5016 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406566 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406576 5016 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406587 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406596 5016 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406607 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406616 5016 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406626 5016 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406636 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406646 5016 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406655 5016 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406699 5016 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406710 5016 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406719 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406761 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406769 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406778 5016 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406799 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406809 5016 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406818 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406828 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406837 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406846 5016 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406855 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406866 5016 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406875 5016 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406884 5016 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406893 5016 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406904 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406912 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406921 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.406930 5016 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407023 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407033 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407043 5016 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407052 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407066 5016 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407076 5016 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407085 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407094 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407106 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407117 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407127 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407139 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407150 5016 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407159 5016 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407169 5016 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407179 5016 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407191 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407204 5016 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407213 5016 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407222 5016 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407231 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407240 5016 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407250 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407262 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407272 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407282 5016 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407291 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407301 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407310 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407319 5016 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407329 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407339 5016 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407348 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407358 5016 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407367 5016 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407377 5016 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407387 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407396 5016 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407406 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407417 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407427 5016 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407436 5016 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407445 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407455 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407464 5016 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407473 5016 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407483 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407492 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407502 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407515 5016 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407525 5016 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407534 5016 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407543 5016 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407552 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407561 5016 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407570 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407578 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407588 5016 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407597 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407606 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407615 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407625 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407634 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407644 5016 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407656 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407666 5016 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407675 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407684 5016 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407694 5016 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407704 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407713 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407721 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.407730 5016 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.408331 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:05.908181351 +0000 UTC m=+22.726740930 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.409054 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.409285 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.409589 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.410197 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.410185 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.410279 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.413021 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.413466 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.416660 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.416866 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.416880 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.417061 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.417062 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.417079 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.417085 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.417092 5016 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.417096 5016 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.417339 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:05.917131756 +0000 UTC m=+22.735691365 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.417365 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:05.917351882 +0000 UTC m=+22.735911671 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.419463 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.422293 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.422884 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.428150 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.478275 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.479025 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.480493 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.481058 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.481854 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.483109 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.483655 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.484670 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.485555 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.486114 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.487239 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.487604 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.487680 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.487693 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.487718 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.487733 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:05Z","lastTransitionTime":"2025-12-11T10:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509253 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509307 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509367 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509380 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509389 5016 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509399 5016 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509407 5016 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509415 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509424 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509433 5016 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509477 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509496 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509519 5016 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509528 5016 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509537 5016 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509545 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509555 5016 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509563 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509571 5016 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509580 5016 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509591 5016 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509603 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509614 5016 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509624 5016 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509635 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.509640 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.578533 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.589627 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.589684 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.589696 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.589715 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.589729 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:05Z","lastTransitionTime":"2025-12-11T10:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.638961 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.640125 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.643025 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.643852 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.655624 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.655662 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.655850 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.656018 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.656051 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.656577 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.657462 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.658189 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.659405 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.659902 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.660170 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.660538 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.660592 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.660822 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.661087 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.661155 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.661971 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.663409 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.690621 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.692307 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.693508 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.693618 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.693711 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.693810 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.693884 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:05Z","lastTransitionTime":"2025-12-11T10:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.695259 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.696855 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.697885 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.699294 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.700121 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.701355 5016 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.701728 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.709018 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.711661 5016 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.711779 5016 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.711837 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.711921 5016 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.712019 5016 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.712140 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.712244 5016 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.712367 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.712516 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.712657 5016 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.712761 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.712900 5016 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.713059 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.713166 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.713262 5016 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.713391 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.714478 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.796524 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.796568 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.796576 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.796605 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.796614 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:05Z","lastTransitionTime":"2025-12-11T10:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.814037 5016 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.887606 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.896819 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.899214 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.899253 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.899269 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.899290 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.899307 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:05Z","lastTransitionTime":"2025-12-11T10:35:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:05 crc kubenswrapper[5016]: W1211 10:35:05.907370 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-ff712f6d4ec6316bbee774e6135af5fa2c813d5b393414ece9431508c1620739 WatchSource:0}: Error finding container ff712f6d4ec6316bbee774e6135af5fa2c813d5b393414ece9431508c1620739: Status 404 returned error can't find the container with id ff712f6d4ec6316bbee774e6135af5fa2c813d5b393414ece9431508c1620739 Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.914924 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.915050 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:06.915025888 +0000 UTC m=+23.733585467 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.915095 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:05 crc kubenswrapper[5016]: I1211 10:35:05.915136 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.915239 5016 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.915314 5016 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.915316 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:06.915298244 +0000 UTC m=+23.733857863 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:05 crc kubenswrapper[5016]: E1211 10:35:05.915408 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:06.915355086 +0000 UTC m=+23.733914775 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.001488 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.001545 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.001567 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.001625 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.001640 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:06Z","lastTransitionTime":"2025-12-11T10:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.015826 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.015900 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.016078 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.016093 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.016103 5016 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.016141 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.016174 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:07.016160546 +0000 UTC m=+23.834720125 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.016176 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.016195 5016 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.016280 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:07.016257588 +0000 UTC m=+23.834817227 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.110655 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.110695 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.110704 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.110719 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.110729 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:06Z","lastTransitionTime":"2025-12-11T10:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.195753 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.196417 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.197001 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.198536 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.200862 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.201579 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.202867 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.203964 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.205059 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.205767 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.207033 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.208255 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.208858 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.209534 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.210595 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.211628 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.214471 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.214496 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.214506 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.214522 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.214534 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:06Z","lastTransitionTime":"2025-12-11T10:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.215074 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.215773 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.216591 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.232652 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.233721 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.235924 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.237525 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.268778 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.269173 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.275229 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.278488 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.280819 5016 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351" exitCode=255 Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.280906 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.281479 5016 scope.go:117] "RemoveContainer" containerID="f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.282126 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"ff712f6d4ec6316bbee774e6135af5fa2c813d5b393414ece9431508c1620739"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.282429 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.283247 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"6be6db7daf8ba24998fdd5d2648517efc2da26ac6ae3a6d65737ab6a24202058"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.284331 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"e6523961ec970fc172f8948d347fdb855c5d1aa3af42aada639f1fa9c99863a6"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.294217 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.305008 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10a14504-cb78-4a73-96dd-5fe8640132aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2d2316a570824689ae8b4652bce6589ad8d06861d7bdfddd68e02452bb9f10d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0cfe1d8ecd790f0d9aae8e53944e730b397e1fd8409e5a3ef5ab590f956596\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.317461 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.317527 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.317536 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.317556 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.317569 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:06Z","lastTransitionTime":"2025-12-11T10:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.320812 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"071ea1a0-65ea-49d7-a4b1-0f8a312c0112\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.335884 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.351145 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.363631 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.382061 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5610bfa4-13d2-4186-a9e2-f18f8714d039\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21ac855626db2f18c17bf52510532154bf818297876b48f22f4b9bcb23201038\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d88a904ce0e220e5000a352352484e0aeac0dda15115ca38f6654ede993a6a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a34a2d537d5a93deb3f35d272c93e721690126ff5fcf82f28246a0e582f32ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4915d3fe61301ca89c16f7f8b63cdd7b618d6b2096efd9834c0ef09396b5da9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67f3620eb07a4726b9869bb3a235b066ca330e4ec8d5a4ae6fe3a5236b70f4d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.392711 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.402185 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.411575 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10a14504-cb78-4a73-96dd-5fe8640132aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2d2316a570824689ae8b4652bce6589ad8d06861d7bdfddd68e02452bb9f10d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0cfe1d8ecd790f0d9aae8e53944e730b397e1fd8409e5a3ef5ab590f956596\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.419553 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.419609 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.419621 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.419637 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.419647 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:06Z","lastTransitionTime":"2025-12-11T10:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.424034 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"071ea1a0-65ea-49d7-a4b1-0f8a312c0112\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1211 10:34:57.908524 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 10:34:57.912494 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2684440780/tls.crt::/tmp/serving-cert-2684440780/tls.key\\\\\\\"\\\\nI1211 10:35:04.252522 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1211 10:35:04.257124 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1211 10:35:04.257226 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1211 10:35:04.257293 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1211 10:35:04.257339 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1211 10:35:04.268814 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1211 10:35:04.268871 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268878 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1211 10:35:04.268889 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1211 10:35:04.268893 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1211 10:35:04.268897 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1211 10:35:04.269301 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1211 10:35:04.273272 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.437317 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.450558 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.459993 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.468078 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.473497 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.473564 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.473632 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.473678 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.521557 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.521611 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.521624 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.521646 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.521657 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:06Z","lastTransitionTime":"2025-12-11T10:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.624829 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.624885 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.624899 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.624916 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.624930 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:06Z","lastTransitionTime":"2025-12-11T10:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.726640 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.726673 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.726682 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.726696 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.726705 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:06Z","lastTransitionTime":"2025-12-11T10:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.828907 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.828967 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.828979 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.828997 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.829009 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:06Z","lastTransitionTime":"2025-12-11T10:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.923926 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.924631 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:08.924591453 +0000 UTC m=+25.743151072 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.924869 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.925115 5016 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.925117 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.925218 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:08.925190177 +0000 UTC m=+25.743749796 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.925442 5016 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:06 crc kubenswrapper[5016]: E1211 10:35:06.925613 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:08.925595176 +0000 UTC m=+25.744154785 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.931931 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.931982 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.931991 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.932004 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:06 crc kubenswrapper[5016]: I1211 10:35:06.932012 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:06Z","lastTransitionTime":"2025-12-11T10:35:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.025638 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.025772 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:07 crc kubenswrapper[5016]: E1211 10:35:07.025676 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:07 crc kubenswrapper[5016]: E1211 10:35:07.025827 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:07 crc kubenswrapper[5016]: E1211 10:35:07.025847 5016 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:07 crc kubenswrapper[5016]: E1211 10:35:07.025895 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:09.025881344 +0000 UTC m=+25.844440923 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:07 crc kubenswrapper[5016]: E1211 10:35:07.025927 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:07 crc kubenswrapper[5016]: E1211 10:35:07.025975 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:07 crc kubenswrapper[5016]: E1211 10:35:07.025988 5016 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:07 crc kubenswrapper[5016]: E1211 10:35:07.026045 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:09.026027547 +0000 UTC m=+25.844587186 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.034244 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.034271 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.034278 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.034290 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.034300 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:07Z","lastTransitionTime":"2025-12-11T10:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.137695 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.137747 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.137768 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.137790 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.137805 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:07Z","lastTransitionTime":"2025-12-11T10:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.240446 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.240487 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.240496 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.240509 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.240517 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:07Z","lastTransitionTime":"2025-12-11T10:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:07 crc kubenswrapper[5016]: E1211 10:35:07.299058 5016 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"etcd-crc\" already exists" pod="openshift-etcd/etcd-crc" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.343086 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.343126 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.343136 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.343151 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.343163 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:07Z","lastTransitionTime":"2025-12-11T10:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.445865 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.445901 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.445910 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.445925 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.445953 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:07Z","lastTransitionTime":"2025-12-11T10:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.474457 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:07 crc kubenswrapper[5016]: E1211 10:35:07.474671 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.478197 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.479093 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.480119 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.480733 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.481755 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.482287 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.482809 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.483915 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.557230 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.557264 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.557273 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.557290 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.557302 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:07Z","lastTransitionTime":"2025-12-11T10:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.659065 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.659108 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.659119 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.659136 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.659149 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:07Z","lastTransitionTime":"2025-12-11T10:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.761426 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.761468 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.761479 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.761497 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.761515 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:07Z","lastTransitionTime":"2025-12-11T10:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.863891 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.863921 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.863929 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.863963 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.863975 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:07Z","lastTransitionTime":"2025-12-11T10:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.966638 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.966687 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.966700 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.966717 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:07 crc kubenswrapper[5016]: I1211 10:35:07.966728 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:07Z","lastTransitionTime":"2025-12-11T10:35:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.000418 5016 csr.go:261] certificate signing request csr-nmpgv is approved, waiting to be issued Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.069046 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.069080 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.069087 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.069100 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.069112 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:08Z","lastTransitionTime":"2025-12-11T10:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.171522 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.171558 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.171568 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.171584 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.171596 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:08Z","lastTransitionTime":"2025-12-11T10:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.274563 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.274633 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.274652 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.274676 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.274692 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:08Z","lastTransitionTime":"2025-12-11T10:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.291891 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"f0a285d2cbeb8195ae60cf34839e1f66992e57742406574b0a00de9d52e8a4f9"} Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.377545 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.377586 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.377597 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.377613 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.377624 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:08Z","lastTransitionTime":"2025-12-11T10:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.474264 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.474271 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:08 crc kubenswrapper[5016]: E1211 10:35:08.474431 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:08 crc kubenswrapper[5016]: E1211 10:35:08.474574 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.480392 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.480418 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.480425 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.480436 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.480447 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:08Z","lastTransitionTime":"2025-12-11T10:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.582912 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.582979 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.582989 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.583015 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.583034 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:08Z","lastTransitionTime":"2025-12-11T10:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.685519 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.685597 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.685616 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.685645 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.685661 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:08Z","lastTransitionTime":"2025-12-11T10:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.788392 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.788449 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.788458 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.788474 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.788484 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:08Z","lastTransitionTime":"2025-12-11T10:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.891114 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.891370 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.891451 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.891520 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.891588 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:08Z","lastTransitionTime":"2025-12-11T10:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.945797 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.945878 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.945923 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:08 crc kubenswrapper[5016]: E1211 10:35:08.946001 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:12.945977197 +0000 UTC m=+29.764536786 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:08 crc kubenswrapper[5016]: E1211 10:35:08.946065 5016 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:08 crc kubenswrapper[5016]: E1211 10:35:08.946118 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:12.946110081 +0000 UTC m=+29.764669660 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:08 crc kubenswrapper[5016]: E1211 10:35:08.946185 5016 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:08 crc kubenswrapper[5016]: E1211 10:35:08.946374 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:12.946331916 +0000 UTC m=+29.764891535 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.999308 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.999383 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.999403 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.999431 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:08 crc kubenswrapper[5016]: I1211 10:35:08.999454 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:08Z","lastTransitionTime":"2025-12-11T10:35:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.047372 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.047462 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:09 crc kubenswrapper[5016]: E1211 10:35:09.047592 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:09 crc kubenswrapper[5016]: E1211 10:35:09.047615 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:09 crc kubenswrapper[5016]: E1211 10:35:09.047630 5016 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:09 crc kubenswrapper[5016]: E1211 10:35:09.047668 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:09 crc kubenswrapper[5016]: E1211 10:35:09.047706 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:09 crc kubenswrapper[5016]: E1211 10:35:09.047695 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:13.047676898 +0000 UTC m=+29.866236477 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:09 crc kubenswrapper[5016]: E1211 10:35:09.047722 5016 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:09 crc kubenswrapper[5016]: E1211 10:35:09.047783 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:13.04776328 +0000 UTC m=+29.866322859 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.101837 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.101875 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.101883 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.101897 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.101907 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:09Z","lastTransitionTime":"2025-12-11T10:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.203721 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.203747 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.203755 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.203767 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.203775 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:09Z","lastTransitionTime":"2025-12-11T10:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.305782 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.305819 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.305827 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.305847 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.305866 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:09Z","lastTransitionTime":"2025-12-11T10:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.408004 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.408040 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.408049 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.408064 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.408073 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:09Z","lastTransitionTime":"2025-12-11T10:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.474314 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:09 crc kubenswrapper[5016]: E1211 10:35:09.474438 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.510667 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.510715 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.510723 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.510738 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.510748 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:09Z","lastTransitionTime":"2025-12-11T10:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.545083 5016 csr.go:257] certificate signing request csr-nmpgv is issued Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.613398 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.613458 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.613467 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.613482 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.613494 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:09Z","lastTransitionTime":"2025-12-11T10:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.715533 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.715572 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.715607 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.715623 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.715634 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:09Z","lastTransitionTime":"2025-12-11T10:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.818320 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.818368 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.818378 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.818394 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.818406 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:09Z","lastTransitionTime":"2025-12-11T10:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.921029 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.921069 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.921078 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.921091 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:09 crc kubenswrapper[5016]: I1211 10:35:09.921100 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:09Z","lastTransitionTime":"2025-12-11T10:35:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.023913 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.023985 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.023997 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.024015 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.024027 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:10Z","lastTransitionTime":"2025-12-11T10:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.126651 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.126726 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.126739 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.126764 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.126783 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:10Z","lastTransitionTime":"2025-12-11T10:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.229813 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.229870 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.229882 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.229985 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.230000 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:10Z","lastTransitionTime":"2025-12-11T10:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.299732 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.301983 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"31d86d5f7654802bd05a4a9c4d506deb5b9bcf5fd1ae02722093170105a93409"} Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.331679 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.331718 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.331726 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.331737 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.331745 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:10Z","lastTransitionTime":"2025-12-11T10:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.434465 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.434503 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.434512 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.434526 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.434536 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:10Z","lastTransitionTime":"2025-12-11T10:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.474097 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.474161 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:10 crc kubenswrapper[5016]: E1211 10:35:10.474219 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:10 crc kubenswrapper[5016]: E1211 10:35:10.474301 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.536872 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.536958 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.536970 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.536985 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.536995 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:10Z","lastTransitionTime":"2025-12-11T10:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.546357 5016 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-12-11 10:30:09 +0000 UTC, rotation deadline is 2026-08-30 00:14:41.780687764 +0000 UTC Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.546386 5016 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6277h39m31.234303661s for next certificate rotation Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.639318 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.639362 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.639370 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.639382 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.639404 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:10Z","lastTransitionTime":"2025-12-11T10:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.743228 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.743307 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.743316 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.743335 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.743347 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:10Z","lastTransitionTime":"2025-12-11T10:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.847844 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.847890 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.847899 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.847916 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.847926 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:10Z","lastTransitionTime":"2025-12-11T10:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.885914 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.949920 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.949972 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.949981 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.949997 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:10 crc kubenswrapper[5016]: I1211 10:35:10.950010 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:10Z","lastTransitionTime":"2025-12-11T10:35:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.051918 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.051993 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.052005 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.052020 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.052032 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:11Z","lastTransitionTime":"2025-12-11T10:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.153722 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.153751 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.153761 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.153777 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.153786 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:11Z","lastTransitionTime":"2025-12-11T10:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.256389 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.256468 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.256485 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.256515 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.256532 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:11Z","lastTransitionTime":"2025-12-11T10:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.309361 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"a2d9df7dbe3ea044109906e798c93b097bcfc634185cd997f5f8215ae885d785"} Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.358746 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.358777 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.358786 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.358799 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.358809 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:11Z","lastTransitionTime":"2025-12-11T10:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.461283 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.461778 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.461857 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.461958 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.462048 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:11Z","lastTransitionTime":"2025-12-11T10:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.474167 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:11 crc kubenswrapper[5016]: E1211 10:35:11.474344 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.564585 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.564611 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.564619 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.564631 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.564640 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:11Z","lastTransitionTime":"2025-12-11T10:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.666347 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.666404 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.666419 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.666446 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.666459 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:11Z","lastTransitionTime":"2025-12-11T10:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.768310 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.768402 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.768425 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.768455 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.768478 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:11Z","lastTransitionTime":"2025-12-11T10:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.854639 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-2x7t7"] Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.854988 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.857060 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.857550 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-7m8vj"] Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.857839 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.858183 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-54q52"] Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.858337 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.858684 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.859149 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.859363 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.859216 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.860031 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-rgpjs"] Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.860225 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-k9ssc"] Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.860318 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-rgpjs" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.860414 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.861792 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.862665 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.863300 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.863373 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.863597 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.863635 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.864046 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.864481 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.864767 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.864931 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.865105 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.866605 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.867384 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.867412 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.867474 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.867537 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.867568 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.870309 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.870352 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.870359 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.870372 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.870386 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:11Z","lastTransitionTime":"2025-12-11T10:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.873834 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/e679c083-2480-4bc8-a8ea-dc2ff0412508-rootfs\") pod \"machine-config-daemon-2x7t7\" (UID: \"e679c083-2480-4bc8-a8ea-dc2ff0412508\") " pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.873897 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-run-netns\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.873930 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-multus-conf-dir\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.873994 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-run-multus-certs\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874031 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-etc-kubernetes\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874063 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-kubelet\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874097 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a73d5878-697e-4e13-924c-248fb9150c9e-cnibin\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874134 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-multus-cni-dir\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874163 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-var-lib-kubelet\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874194 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-run-netns\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874245 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-systemd\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874280 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-cni-bin\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874318 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-env-overrides\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874352 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e679c083-2480-4bc8-a8ea-dc2ff0412508-proxy-tls\") pod \"machine-config-daemon-2x7t7\" (UID: \"e679c083-2480-4bc8-a8ea-dc2ff0412508\") " pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874386 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-system-cni-dir\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874421 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-ovnkube-config\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874454 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-openvswitch\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874490 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874527 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a73d5878-697e-4e13-924c-248fb9150c9e-system-cni-dir\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874561 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-etc-openvswitch\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874610 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-node-log\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874642 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-ovnkube-script-lib\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874690 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckw66\" (UniqueName: \"kubernetes.io/projected/62530621-fff3-49c0-ba0d-14d7ec144c5f-kube-api-access-ckw66\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874721 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-systemd-units\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874753 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-hostroot\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874831 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-cnibin\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874864 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-os-release\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874888 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-var-lib-cni-bin\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.874958 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-run-ovn-kubernetes\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.875017 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5t5hc\" (UniqueName: \"kubernetes.io/projected/113e28fe-424e-491f-a50b-29ddf3e19cb8-kube-api-access-5t5hc\") pod \"node-resolver-rgpjs\" (UID: \"113e28fe-424e-491f-a50b-29ddf3e19cb8\") " pod="openshift-dns/node-resolver-rgpjs" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.875040 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-run-k8s-cni-cncf-io\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.875082 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-ovn\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.875125 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a73d5878-697e-4e13-924c-248fb9150c9e-os-release\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.875579 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-slash\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.875664 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-var-lib-openvswitch\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.875705 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-log-socket\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879089 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hw9x\" (UniqueName: \"kubernetes.io/projected/1a090784-1b4b-4c21-b425-9ea90576fc74-kube-api-access-6hw9x\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879349 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/62530621-fff3-49c0-ba0d-14d7ec144c5f-multus-daemon-config\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879605 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1a090784-1b4b-4c21-b425-9ea90576fc74-ovn-node-metrics-cert\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879648 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-var-lib-cni-multus\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879672 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a73d5878-697e-4e13-924c-248fb9150c9e-cni-binary-copy\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879694 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a73d5878-697e-4e13-924c-248fb9150c9e-tuning-conf-dir\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879725 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e679c083-2480-4bc8-a8ea-dc2ff0412508-mcd-auth-proxy-config\") pod \"machine-config-daemon-2x7t7\" (UID: \"e679c083-2480-4bc8-a8ea-dc2ff0412508\") " pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879749 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4cmd\" (UniqueName: \"kubernetes.io/projected/e679c083-2480-4bc8-a8ea-dc2ff0412508-kube-api-access-s4cmd\") pod \"machine-config-daemon-2x7t7\" (UID: \"e679c083-2480-4bc8-a8ea-dc2ff0412508\") " pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879768 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/113e28fe-424e-491f-a50b-29ddf3e19cb8-hosts-file\") pod \"node-resolver-rgpjs\" (UID: \"113e28fe-424e-491f-a50b-29ddf3e19cb8\") " pod="openshift-dns/node-resolver-rgpjs" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879789 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-cni-netd\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879808 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf92s\" (UniqueName: \"kubernetes.io/projected/a73d5878-697e-4e13-924c-248fb9150c9e-kube-api-access-gf92s\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879828 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/62530621-fff3-49c0-ba0d-14d7ec144c5f-cni-binary-copy\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879850 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-multus-socket-dir-parent\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.879919 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a73d5878-697e-4e13-924c-248fb9150c9e-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.881388 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5610bfa4-13d2-4186-a9e2-f18f8714d039\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21ac855626db2f18c17bf52510532154bf818297876b48f22f4b9bcb23201038\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d88a904ce0e220e5000a352352484e0aeac0dda15115ca38f6654ede993a6a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a34a2d537d5a93deb3f35d272c93e721690126ff5fcf82f28246a0e582f32ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4915d3fe61301ca89c16f7f8b63cdd7b618d6b2096efd9834c0ef09396b5da9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67f3620eb07a4726b9869bb3a235b066ca330e4ec8d5a4ae6fe3a5236b70f4d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.894760 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.908660 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.922577 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e679c083-2480-4bc8-a8ea-dc2ff0412508\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2x7t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.935286 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.944120 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.954855 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.965423 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.975788 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.975828 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.975838 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.975853 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.975863 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:11Z","lastTransitionTime":"2025-12-11T10:35:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.978736 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10a14504-cb78-4a73-96dd-5fe8640132aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2d2316a570824689ae8b4652bce6589ad8d06861d7bdfddd68e02452bb9f10d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0cfe1d8ecd790f0d9aae8e53944e730b397e1fd8409e5a3ef5ab590f956596\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981078 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-kubelet\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981114 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a73d5878-697e-4e13-924c-248fb9150c9e-cnibin\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981139 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-run-multus-certs\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981160 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-etc-kubernetes\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981192 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-var-lib-kubelet\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981214 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-run-netns\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981241 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-multus-cni-dir\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981243 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-etc-kubernetes\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981262 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-cni-bin\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981210 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a73d5878-697e-4e13-924c-248fb9150c9e-cnibin\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981283 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-env-overrides\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981367 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-var-lib-kubelet\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981439 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-systemd\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981482 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-multus-cni-dir\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981499 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-system-cni-dir\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981290 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-run-multus-certs\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981479 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-cni-bin\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981374 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-run-netns\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981537 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-ovnkube-config\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981520 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-systemd\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981600 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-kubelet\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981649 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-system-cni-dir\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981682 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e679c083-2480-4bc8-a8ea-dc2ff0412508-proxy-tls\") pod \"machine-config-daemon-2x7t7\" (UID: \"e679c083-2480-4bc8-a8ea-dc2ff0412508\") " pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981763 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-etc-openvswitch\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981887 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-openvswitch\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981916 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981949 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a73d5878-697e-4e13-924c-248fb9150c9e-system-cni-dir\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981925 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-env-overrides\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981968 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-node-log\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981975 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-etc-openvswitch\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981999 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-node-log\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982003 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-ovnkube-script-lib\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982041 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a73d5878-697e-4e13-924c-248fb9150c9e-system-cni-dir\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982045 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-hostroot\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.981960 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-openvswitch\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982006 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982069 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckw66\" (UniqueName: \"kubernetes.io/projected/62530621-fff3-49c0-ba0d-14d7ec144c5f-kube-api-access-ckw66\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982082 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-hostroot\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982091 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-systemd-units\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982111 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-run-ovn-kubernetes\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982138 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-cnibin\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982157 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-os-release\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982181 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-var-lib-cni-bin\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982204 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-ovn\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982225 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a73d5878-697e-4e13-924c-248fb9150c9e-os-release\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982228 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-ovnkube-config\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982249 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5t5hc\" (UniqueName: \"kubernetes.io/projected/113e28fe-424e-491f-a50b-29ddf3e19cb8-kube-api-access-5t5hc\") pod \"node-resolver-rgpjs\" (UID: \"113e28fe-424e-491f-a50b-29ddf3e19cb8\") " pod="openshift-dns/node-resolver-rgpjs" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982272 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-run-k8s-cni-cncf-io\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982290 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-os-release\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982294 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hw9x\" (UniqueName: \"kubernetes.io/projected/1a090784-1b4b-4c21-b425-9ea90576fc74-kube-api-access-6hw9x\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982314 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-systemd-units\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982319 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-slash\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982338 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-run-ovn-kubernetes\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982342 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-var-lib-openvswitch\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982363 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-log-socket\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982374 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a73d5878-697e-4e13-924c-248fb9150c9e-os-release\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982407 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/62530621-fff3-49c0-ba0d-14d7ec144c5f-multus-daemon-config\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982433 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1a090784-1b4b-4c21-b425-9ea90576fc74-ovn-node-metrics-cert\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982458 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-var-lib-cni-multus\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982475 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-ovn\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982481 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a73d5878-697e-4e13-924c-248fb9150c9e-cni-binary-copy\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982516 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-run-k8s-cni-cncf-io\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982369 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-cnibin\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982537 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a73d5878-697e-4e13-924c-248fb9150c9e-tuning-conf-dir\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982556 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-slash\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982561 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/113e28fe-424e-491f-a50b-29ddf3e19cb8-hosts-file\") pod \"node-resolver-rgpjs\" (UID: \"113e28fe-424e-491f-a50b-29ddf3e19cb8\") " pod="openshift-dns/node-resolver-rgpjs" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982578 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-var-lib-openvswitch\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982581 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-cni-netd\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982601 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-log-socket\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982603 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf92s\" (UniqueName: \"kubernetes.io/projected/a73d5878-697e-4e13-924c-248fb9150c9e-kube-api-access-gf92s\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982612 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-ovnkube-script-lib\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982632 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e679c083-2480-4bc8-a8ea-dc2ff0412508-mcd-auth-proxy-config\") pod \"machine-config-daemon-2x7t7\" (UID: \"e679c083-2480-4bc8-a8ea-dc2ff0412508\") " pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982436 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-var-lib-cni-bin\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982651 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4cmd\" (UniqueName: \"kubernetes.io/projected/e679c083-2480-4bc8-a8ea-dc2ff0412508-kube-api-access-s4cmd\") pod \"machine-config-daemon-2x7t7\" (UID: \"e679c083-2480-4bc8-a8ea-dc2ff0412508\") " pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982669 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-var-lib-cni-multus\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982670 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/62530621-fff3-49c0-ba0d-14d7ec144c5f-cni-binary-copy\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982694 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-multus-socket-dir-parent\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982712 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a73d5878-697e-4e13-924c-248fb9150c9e-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982731 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-multus-conf-dir\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982739 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-cni-netd\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982753 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/e679c083-2480-4bc8-a8ea-dc2ff0412508-rootfs\") pod \"machine-config-daemon-2x7t7\" (UID: \"e679c083-2480-4bc8-a8ea-dc2ff0412508\") " pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982778 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-run-netns\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982788 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/113e28fe-424e-491f-a50b-29ddf3e19cb8-hosts-file\") pod \"node-resolver-rgpjs\" (UID: \"113e28fe-424e-491f-a50b-29ddf3e19cb8\") " pod="openshift-dns/node-resolver-rgpjs" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982835 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-host-run-netns\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982847 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-multus-conf-dir\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982879 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/e679c083-2480-4bc8-a8ea-dc2ff0412508-rootfs\") pod \"machine-config-daemon-2x7t7\" (UID: \"e679c083-2480-4bc8-a8ea-dc2ff0412508\") " pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.982920 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/62530621-fff3-49c0-ba0d-14d7ec144c5f-multus-socket-dir-parent\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.983278 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a73d5878-697e-4e13-924c-248fb9150c9e-cni-binary-copy\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.983591 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/62530621-fff3-49c0-ba0d-14d7ec144c5f-multus-daemon-config\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.983605 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e679c083-2480-4bc8-a8ea-dc2ff0412508-mcd-auth-proxy-config\") pod \"machine-config-daemon-2x7t7\" (UID: \"e679c083-2480-4bc8-a8ea-dc2ff0412508\") " pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.983622 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a73d5878-697e-4e13-924c-248fb9150c9e-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.985921 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/62530621-fff3-49c0-ba0d-14d7ec144c5f-cni-binary-copy\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.986324 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1a090784-1b4b-4c21-b425-9ea90576fc74-ovn-node-metrics-cert\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.986402 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e679c083-2480-4bc8-a8ea-dc2ff0412508-proxy-tls\") pod \"machine-config-daemon-2x7t7\" (UID: \"e679c083-2480-4bc8-a8ea-dc2ff0412508\") " pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.989743 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"071ea1a0-65ea-49d7-a4b1-0f8a312c0112\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1211 10:34:57.908524 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 10:34:57.912494 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2684440780/tls.crt::/tmp/serving-cert-2684440780/tls.key\\\\\\\"\\\\nI1211 10:35:04.252522 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1211 10:35:04.257124 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1211 10:35:04.257226 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1211 10:35:04.257293 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1211 10:35:04.257339 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1211 10:35:04.268814 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1211 10:35:04.268871 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268878 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1211 10:35:04.268889 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1211 10:35:04.268893 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1211 10:35:04.268897 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1211 10:35:04.269301 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1211 10:35:04.273272 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.998738 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4cmd\" (UniqueName: \"kubernetes.io/projected/e679c083-2480-4bc8-a8ea-dc2ff0412508-kube-api-access-s4cmd\") pod \"machine-config-daemon-2x7t7\" (UID: \"e679c083-2480-4bc8-a8ea-dc2ff0412508\") " pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.999437 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf92s\" (UniqueName: \"kubernetes.io/projected/a73d5878-697e-4e13-924c-248fb9150c9e-kube-api-access-gf92s\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:11 crc kubenswrapper[5016]: I1211 10:35:11.999574 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hw9x\" (UniqueName: \"kubernetes.io/projected/1a090784-1b4b-4c21-b425-9ea90576fc74-kube-api-access-6hw9x\") pod \"ovnkube-node-7m8vj\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.000429 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckw66\" (UniqueName: \"kubernetes.io/projected/62530621-fff3-49c0-ba0d-14d7ec144c5f-kube-api-access-ckw66\") pod \"multus-k9ssc\" (UID: \"62530621-fff3-49c0-ba0d-14d7ec144c5f\") " pod="openshift-multus/multus-k9ssc" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.002673 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.015043 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a73d5878-697e-4e13-924c-248fb9150c9e-tuning-conf-dir\") pod \"multus-additional-cni-plugins-54q52\" (UID: \"a73d5878-697e-4e13-924c-248fb9150c9e\") " pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.018707 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k9ssc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62530621-fff3-49c0-ba0d-14d7ec144c5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ckw66\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k9ssc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.029634 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10a14504-cb78-4a73-96dd-5fe8640132aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2d2316a570824689ae8b4652bce6589ad8d06861d7bdfddd68e02452bb9f10d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0cfe1d8ecd790f0d9aae8e53944e730b397e1fd8409e5a3ef5ab590f956596\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.042764 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"071ea1a0-65ea-49d7-a4b1-0f8a312c0112\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1211 10:34:57.908524 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 10:34:57.912494 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2684440780/tls.crt::/tmp/serving-cert-2684440780/tls.key\\\\\\\"\\\\nI1211 10:35:04.252522 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1211 10:35:04.257124 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1211 10:35:04.257226 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1211 10:35:04.257293 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1211 10:35:04.257339 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1211 10:35:04.268814 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1211 10:35:04.268871 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268878 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1211 10:35:04.268889 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1211 10:35:04.268893 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1211 10:35:04.268897 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1211 10:35:04.269301 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1211 10:35:04.273272 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.054548 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.064811 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.076362 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.078165 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.078306 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.078396 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.078529 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.078613 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:12Z","lastTransitionTime":"2025-12-11T10:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.088211 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.092525 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5t5hc\" (UniqueName: \"kubernetes.io/projected/113e28fe-424e-491f-a50b-29ddf3e19cb8-kube-api-access-5t5hc\") pod \"node-resolver-rgpjs\" (UID: \"113e28fe-424e-491f-a50b-29ddf3e19cb8\") " pod="openshift-dns/node-resolver-rgpjs" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.108886 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5610bfa4-13d2-4186-a9e2-f18f8714d039\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21ac855626db2f18c17bf52510532154bf818297876b48f22f4b9bcb23201038\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d88a904ce0e220e5000a352352484e0aeac0dda15115ca38f6654ede993a6a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a34a2d537d5a93deb3f35d272c93e721690126ff5fcf82f28246a0e582f32ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4915d3fe61301ca89c16f7f8b63cdd7b618d6b2096efd9834c0ef09396b5da9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67f3620eb07a4726b9869bb3a235b066ca330e4ec8d5a4ae6fe3a5236b70f4d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.124294 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.134887 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e679c083-2480-4bc8-a8ea-dc2ff0412508\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2x7t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.147467 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a73d5878-697e-4e13-924c-248fb9150c9e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-54q52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.163171 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a090784-1b4b-4c21-b425-9ea90576fc74\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7m8vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.171587 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rgpjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"113e28fe-424e-491f-a50b-29ddf3e19cb8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t5hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rgpjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.175870 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.181202 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.181236 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.181245 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.181260 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.181271 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:12Z","lastTransitionTime":"2025-12-11T10:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:12 crc kubenswrapper[5016]: W1211 10:35:12.185790 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode679c083_2480_4bc8_a8ea_dc2ff0412508.slice/crio-fcc20c360fefcd520a5a424dea08341ff6c2c8fc267ed2e331a826b449721164 WatchSource:0}: Error finding container fcc20c360fefcd520a5a424dea08341ff6c2c8fc267ed2e331a826b449721164: Status 404 returned error can't find the container with id fcc20c360fefcd520a5a424dea08341ff6c2c8fc267ed2e331a826b449721164 Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.188156 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-54q52" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.195651 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.204879 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-rgpjs" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.213157 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-k9ssc" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.283306 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.283385 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.283399 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.283422 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.283436 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:12Z","lastTransitionTime":"2025-12-11T10:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.314710 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"fcc20c360fefcd520a5a424dea08341ff6c2c8fc267ed2e331a826b449721164"} Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.315997 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.328849 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.339725 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.348819 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.359357 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10a14504-cb78-4a73-96dd-5fe8640132aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2d2316a570824689ae8b4652bce6589ad8d06861d7bdfddd68e02452bb9f10d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0cfe1d8ecd790f0d9aae8e53944e730b397e1fd8409e5a3ef5ab590f956596\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.373585 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"071ea1a0-65ea-49d7-a4b1-0f8a312c0112\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1211 10:34:57.908524 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 10:34:57.912494 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2684440780/tls.crt::/tmp/serving-cert-2684440780/tls.key\\\\\\\"\\\\nI1211 10:35:04.252522 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1211 10:35:04.257124 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1211 10:35:04.257226 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1211 10:35:04.257293 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1211 10:35:04.257339 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1211 10:35:04.268814 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1211 10:35:04.268871 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268878 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1211 10:35:04.268889 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1211 10:35:04.268893 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1211 10:35:04.268897 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1211 10:35:04.269301 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1211 10:35:04.273272 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.384008 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a285d2cbeb8195ae60cf34839e1f66992e57742406574b0a00de9d52e8a4f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:35:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.385604 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.385653 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.385666 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.385683 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.385695 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:12Z","lastTransitionTime":"2025-12-11T10:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.393303 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e679c083-2480-4bc8-a8ea-dc2ff0412508\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2x7t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.404178 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a73d5878-697e-4e13-924c-248fb9150c9e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-54q52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.420604 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a090784-1b4b-4c21-b425-9ea90576fc74\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7m8vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.436912 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5610bfa4-13d2-4186-a9e2-f18f8714d039\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21ac855626db2f18c17bf52510532154bf818297876b48f22f4b9bcb23201038\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d88a904ce0e220e5000a352352484e0aeac0dda15115ca38f6654ede993a6a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a34a2d537d5a93deb3f35d272c93e721690126ff5fcf82f28246a0e582f32ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4915d3fe61301ca89c16f7f8b63cdd7b618d6b2096efd9834c0ef09396b5da9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67f3620eb07a4726b9869bb3a235b066ca330e4ec8d5a4ae6fe3a5236b70f4d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.447548 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.454931 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rgpjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"113e28fe-424e-491f-a50b-29ddf3e19cb8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t5hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rgpjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.464690 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.473879 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.473928 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:12 crc kubenswrapper[5016]: E1211 10:35:12.474050 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:12 crc kubenswrapper[5016]: E1211 10:35:12.474179 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.475150 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k9ssc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62530621-fff3-49c0-ba0d-14d7ec144c5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ckw66\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k9ssc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.482784 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rgpjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"113e28fe-424e-491f-a50b-29ddf3e19cb8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t5hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rgpjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.487921 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.487990 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.488002 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.488019 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.488032 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:12Z","lastTransitionTime":"2025-12-11T10:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.491436 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.500836 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k9ssc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62530621-fff3-49c0-ba0d-14d7ec144c5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ckw66\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k9ssc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.511348 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10a14504-cb78-4a73-96dd-5fe8640132aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2d2316a570824689ae8b4652bce6589ad8d06861d7bdfddd68e02452bb9f10d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0cfe1d8ecd790f0d9aae8e53944e730b397e1fd8409e5a3ef5ab590f956596\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.522071 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"071ea1a0-65ea-49d7-a4b1-0f8a312c0112\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31d86d5f7654802bd05a4a9c4d506deb5b9bcf5fd1ae02722093170105a93409\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1211 10:34:57.908524 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 10:34:57.912494 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2684440780/tls.crt::/tmp/serving-cert-2684440780/tls.key\\\\\\\"\\\\nI1211 10:35:04.252522 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1211 10:35:04.257124 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1211 10:35:04.257226 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1211 10:35:04.257293 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1211 10:35:04.257339 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1211 10:35:04.268814 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1211 10:35:04.268871 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268878 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1211 10:35:04.268889 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1211 10:35:04.268893 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1211 10:35:04.268897 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1211 10:35:04.269301 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1211 10:35:04.273272 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:35:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.533627 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a285d2cbeb8195ae60cf34839e1f66992e57742406574b0a00de9d52e8a4f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:35:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.542599 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.553793 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.562530 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.577779 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5610bfa4-13d2-4186-a9e2-f18f8714d039\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21ac855626db2f18c17bf52510532154bf818297876b48f22f4b9bcb23201038\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d88a904ce0e220e5000a352352484e0aeac0dda15115ca38f6654ede993a6a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a34a2d537d5a93deb3f35d272c93e721690126ff5fcf82f28246a0e582f32ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4915d3fe61301ca89c16f7f8b63cdd7b618d6b2096efd9834c0ef09396b5da9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67f3620eb07a4726b9869bb3a235b066ca330e4ec8d5a4ae6fe3a5236b70f4d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.588505 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.589642 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.589692 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.589704 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.589720 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.589732 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:12Z","lastTransitionTime":"2025-12-11T10:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.597640 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e679c083-2480-4bc8-a8ea-dc2ff0412508\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2x7t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.609759 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a73d5878-697e-4e13-924c-248fb9150c9e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-54q52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.623665 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a090784-1b4b-4c21-b425-9ea90576fc74\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7m8vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.691905 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.691930 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.691992 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.692008 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.692019 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:12Z","lastTransitionTime":"2025-12-11T10:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.794188 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.794226 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.794237 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.794253 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.794263 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:12Z","lastTransitionTime":"2025-12-11T10:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.896266 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.896308 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.896319 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.896334 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.896345 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:12Z","lastTransitionTime":"2025-12-11T10:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.990582 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.990721 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:12 crc kubenswrapper[5016]: E1211 10:35:12.990777 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:20.990742273 +0000 UTC m=+37.809301852 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:12 crc kubenswrapper[5016]: E1211 10:35:12.990806 5016 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.990827 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:12 crc kubenswrapper[5016]: E1211 10:35:12.990865 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:20.990851116 +0000 UTC m=+37.809410785 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:12 crc kubenswrapper[5016]: E1211 10:35:12.991004 5016 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:12 crc kubenswrapper[5016]: E1211 10:35:12.991093 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:20.991051721 +0000 UTC m=+37.809611390 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.999119 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.999164 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.999175 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.999192 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:12 crc kubenswrapper[5016]: I1211 10:35:12.999202 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:12Z","lastTransitionTime":"2025-12-11T10:35:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.091710 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.091750 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:13 crc kubenswrapper[5016]: E1211 10:35:13.091870 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:13 crc kubenswrapper[5016]: E1211 10:35:13.091886 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:13 crc kubenswrapper[5016]: E1211 10:35:13.091896 5016 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:13 crc kubenswrapper[5016]: E1211 10:35:13.091926 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:13 crc kubenswrapper[5016]: E1211 10:35:13.091977 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:13 crc kubenswrapper[5016]: E1211 10:35:13.091992 5016 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:13 crc kubenswrapper[5016]: E1211 10:35:13.091965 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:21.091932393 +0000 UTC m=+37.910491972 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:13 crc kubenswrapper[5016]: E1211 10:35:13.092119 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:21.092092687 +0000 UTC m=+37.910652276 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.105198 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.105252 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.105278 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.105300 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.105316 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:13Z","lastTransitionTime":"2025-12-11T10:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.207481 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.207523 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.207535 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.207551 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.207564 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:13Z","lastTransitionTime":"2025-12-11T10:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.309546 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.309574 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.309583 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.309596 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.309605 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:13Z","lastTransitionTime":"2025-12-11T10:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.411916 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.411978 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.411990 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.412005 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.412018 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:13Z","lastTransitionTime":"2025-12-11T10:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.474492 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:13 crc kubenswrapper[5016]: E1211 10:35:13.474622 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.490529 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5610bfa4-13d2-4186-a9e2-f18f8714d039\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21ac855626db2f18c17bf52510532154bf818297876b48f22f4b9bcb23201038\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d88a904ce0e220e5000a352352484e0aeac0dda15115ca38f6654ede993a6a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a34a2d537d5a93deb3f35d272c93e721690126ff5fcf82f28246a0e582f32ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4915d3fe61301ca89c16f7f8b63cdd7b618d6b2096efd9834c0ef09396b5da9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67f3620eb07a4726b9869bb3a235b066ca330e4ec8d5a4ae6fe3a5236b70f4d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.507243 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.513826 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.513861 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.513872 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.513889 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.513901 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:13Z","lastTransitionTime":"2025-12-11T10:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.524093 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e679c083-2480-4bc8-a8ea-dc2ff0412508\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2x7t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.545336 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a73d5878-697e-4e13-924c-248fb9150c9e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-54q52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.569386 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a090784-1b4b-4c21-b425-9ea90576fc74\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7m8vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.576833 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rgpjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"113e28fe-424e-491f-a50b-29ddf3e19cb8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t5hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rgpjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.584696 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.594484 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k9ssc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62530621-fff3-49c0-ba0d-14d7ec144c5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ckw66\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k9ssc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.603371 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10a14504-cb78-4a73-96dd-5fe8640132aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2d2316a570824689ae8b4652bce6589ad8d06861d7bdfddd68e02452bb9f10d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0cfe1d8ecd790f0d9aae8e53944e730b397e1fd8409e5a3ef5ab590f956596\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.614093 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"071ea1a0-65ea-49d7-a4b1-0f8a312c0112\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31d86d5f7654802bd05a4a9c4d506deb5b9bcf5fd1ae02722093170105a93409\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1211 10:34:57.908524 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 10:34:57.912494 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2684440780/tls.crt::/tmp/serving-cert-2684440780/tls.key\\\\\\\"\\\\nI1211 10:35:04.252522 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1211 10:35:04.257124 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1211 10:35:04.257226 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1211 10:35:04.257293 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1211 10:35:04.257339 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1211 10:35:04.268814 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1211 10:35:04.268871 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268878 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1211 10:35:04.268889 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1211 10:35:04.268893 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1211 10:35:04.268897 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1211 10:35:04.269301 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1211 10:35:04.273272 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:35:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.615720 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.615751 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.615760 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.615774 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.615783 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:13Z","lastTransitionTime":"2025-12-11T10:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.624250 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a285d2cbeb8195ae60cf34839e1f66992e57742406574b0a00de9d52e8a4f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:35:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.633423 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.643471 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.653342 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.718355 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.718393 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.718404 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.718419 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.718430 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:13Z","lastTransitionTime":"2025-12-11T10:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.820853 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.820897 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.820910 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.820925 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.820956 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:13Z","lastTransitionTime":"2025-12-11T10:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.921361 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-2696g"] Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.921757 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-2696g" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.922336 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.922364 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.922375 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.922389 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.922399 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:13Z","lastTransitionTime":"2025-12-11T10:35:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.923537 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.923549 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.924580 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.924686 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.935363 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10a14504-cb78-4a73-96dd-5fe8640132aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2d2316a570824689ae8b4652bce6589ad8d06861d7bdfddd68e02452bb9f10d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0cfe1d8ecd790f0d9aae8e53944e730b397e1fd8409e5a3ef5ab590f956596\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.946682 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"071ea1a0-65ea-49d7-a4b1-0f8a312c0112\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31d86d5f7654802bd05a4a9c4d506deb5b9bcf5fd1ae02722093170105a93409\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1211 10:34:57.908524 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 10:34:57.912494 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2684440780/tls.crt::/tmp/serving-cert-2684440780/tls.key\\\\\\\"\\\\nI1211 10:35:04.252522 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1211 10:35:04.257124 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1211 10:35:04.257226 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1211 10:35:04.257293 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1211 10:35:04.257339 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1211 10:35:04.268814 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1211 10:35:04.268871 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268878 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1211 10:35:04.268889 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1211 10:35:04.268893 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1211 10:35:04.268897 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1211 10:35:04.269301 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1211 10:35:04.273272 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:35:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.956629 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a285d2cbeb8195ae60cf34839e1f66992e57742406574b0a00de9d52e8a4f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:35:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.966531 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.975854 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.987910 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.999865 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtx8x\" (UniqueName: \"kubernetes.io/projected/6ca7f819-150e-4755-9037-f3f192f4b42d-kube-api-access-qtx8x\") pod \"node-ca-2696g\" (UID: \"6ca7f819-150e-4755-9037-f3f192f4b42d\") " pod="openshift-image-registry/node-ca-2696g" Dec 11 10:35:13 crc kubenswrapper[5016]: I1211 10:35:13.999911 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/6ca7f819-150e-4755-9037-f3f192f4b42d-serviceca\") pod \"node-ca-2696g\" (UID: \"6ca7f819-150e-4755-9037-f3f192f4b42d\") " pod="openshift-image-registry/node-ca-2696g" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:13.999933 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6ca7f819-150e-4755-9037-f3f192f4b42d-host\") pod \"node-ca-2696g\" (UID: \"6ca7f819-150e-4755-9037-f3f192f4b42d\") " pod="openshift-image-registry/node-ca-2696g" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.003576 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5610bfa4-13d2-4186-a9e2-f18f8714d039\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21ac855626db2f18c17bf52510532154bf818297876b48f22f4b9bcb23201038\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d88a904ce0e220e5000a352352484e0aeac0dda15115ca38f6654ede993a6a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a34a2d537d5a93deb3f35d272c93e721690126ff5fcf82f28246a0e582f32ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4915d3fe61301ca89c16f7f8b63cdd7b618d6b2096efd9834c0ef09396b5da9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67f3620eb07a4726b9869bb3a235b066ca330e4ec8d5a4ae6fe3a5236b70f4d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.012878 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.020400 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e679c083-2480-4bc8-a8ea-dc2ff0412508\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2x7t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.024139 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.024171 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.024180 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.024193 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.024203 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.031060 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a73d5878-697e-4e13-924c-248fb9150c9e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-54q52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.045796 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a090784-1b4b-4c21-b425-9ea90576fc74\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7m8vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.053574 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rgpjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"113e28fe-424e-491f-a50b-29ddf3e19cb8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t5hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rgpjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.059719 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-2696g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ca7f819-150e-4755-9037-f3f192f4b42d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:13Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:13Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qtx8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:13Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-2696g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.068787 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.077913 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k9ssc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62530621-fff3-49c0-ba0d-14d7ec144c5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ckw66\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k9ssc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.100482 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/6ca7f819-150e-4755-9037-f3f192f4b42d-serviceca\") pod \"node-ca-2696g\" (UID: \"6ca7f819-150e-4755-9037-f3f192f4b42d\") " pod="openshift-image-registry/node-ca-2696g" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.100515 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtx8x\" (UniqueName: \"kubernetes.io/projected/6ca7f819-150e-4755-9037-f3f192f4b42d-kube-api-access-qtx8x\") pod \"node-ca-2696g\" (UID: \"6ca7f819-150e-4755-9037-f3f192f4b42d\") " pod="openshift-image-registry/node-ca-2696g" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.100532 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6ca7f819-150e-4755-9037-f3f192f4b42d-host\") pod \"node-ca-2696g\" (UID: \"6ca7f819-150e-4755-9037-f3f192f4b42d\") " pod="openshift-image-registry/node-ca-2696g" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.100596 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6ca7f819-150e-4755-9037-f3f192f4b42d-host\") pod \"node-ca-2696g\" (UID: \"6ca7f819-150e-4755-9037-f3f192f4b42d\") " pod="openshift-image-registry/node-ca-2696g" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.101883 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/6ca7f819-150e-4755-9037-f3f192f4b42d-serviceca\") pod \"node-ca-2696g\" (UID: \"6ca7f819-150e-4755-9037-f3f192f4b42d\") " pod="openshift-image-registry/node-ca-2696g" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.115339 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtx8x\" (UniqueName: \"kubernetes.io/projected/6ca7f819-150e-4755-9037-f3f192f4b42d-kube-api-access-qtx8x\") pod \"node-ca-2696g\" (UID: \"6ca7f819-150e-4755-9037-f3f192f4b42d\") " pod="openshift-image-registry/node-ca-2696g" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.126313 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.126342 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.126350 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.126362 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.126371 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.228582 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.228631 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.228646 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.228675 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.228693 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.233411 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-2696g" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.253546 5016 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Dec 11 10:35:14 crc kubenswrapper[5016]: W1211 10:35:14.254329 5016 reflector.go:484] object-"openshift-image-registry"/"node-ca-dockercfg-4777p": watch of *v1.Secret ended with: very short watch: object-"openshift-image-registry"/"node-ca-dockercfg-4777p": Unexpected watch close - watch lasted less than a second and no items received Dec 11 10:35:14 crc kubenswrapper[5016]: W1211 10:35:14.254340 5016 reflector.go:484] object-"openshift-image-registry"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Dec 11 10:35:14 crc kubenswrapper[5016]: W1211 10:35:14.254907 5016 reflector.go:484] object-"openshift-image-registry"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Dec 11 10:35:14 crc kubenswrapper[5016]: W1211 10:35:14.255701 5016 reflector.go:484] object-"openshift-image-registry"/"image-registry-certificates": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"image-registry-certificates": Unexpected watch close - watch lasted less than a second and no items received Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.331017 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.331082 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.331091 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.331104 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.331114 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.384744 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.384822 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.384832 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.384846 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.384855 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: E1211 10:35:14.395056 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a4f30830-8bfd-48d5-bda8-8a5b9692bf6a\\\",\\\"systemUUID\\\":\\\"e1044399-4f18-4ebb-9d7a-d6302ff4a7fe\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.398696 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.398736 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.398744 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.398758 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.398768 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: E1211 10:35:14.408923 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a4f30830-8bfd-48d5-bda8-8a5b9692bf6a\\\",\\\"systemUUID\\\":\\\"e1044399-4f18-4ebb-9d7a-d6302ff4a7fe\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.411789 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.411830 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.411840 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.411855 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.411865 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: W1211 10:35:14.421120 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda73d5878_697e_4e13_924c_248fb9150c9e.slice/crio-fee5b19122df9b2df8e07f1c8ceee0e72c5d2e7b39f9a01fafbd7e65dedf8bca WatchSource:0}: Error finding container fee5b19122df9b2df8e07f1c8ceee0e72c5d2e7b39f9a01fafbd7e65dedf8bca: Status 404 returned error can't find the container with id fee5b19122df9b2df8e07f1c8ceee0e72c5d2e7b39f9a01fafbd7e65dedf8bca Dec 11 10:35:14 crc kubenswrapper[5016]: E1211 10:35:14.423703 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a4f30830-8bfd-48d5-bda8-8a5b9692bf6a\\\",\\\"systemUUID\\\":\\\"e1044399-4f18-4ebb-9d7a-d6302ff4a7fe\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.428297 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.428411 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.428496 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.428572 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.428630 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: E1211 10:35:14.439549 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a4f30830-8bfd-48d5-bda8-8a5b9692bf6a\\\",\\\"systemUUID\\\":\\\"e1044399-4f18-4ebb-9d7a-d6302ff4a7fe\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: W1211 10:35:14.442723 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ca7f819_150e_4755_9037_f3f192f4b42d.slice/crio-95306ffdfdf920b0bf511194ac53d90c47316fb074f9228c63a2b1cadad48da8 WatchSource:0}: Error finding container 95306ffdfdf920b0bf511194ac53d90c47316fb074f9228c63a2b1cadad48da8: Status 404 returned error can't find the container with id 95306ffdfdf920b0bf511194ac53d90c47316fb074f9228c63a2b1cadad48da8 Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.444125 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.444156 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.444165 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.444181 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.444193 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: E1211 10:35:14.460046 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a4f30830-8bfd-48d5-bda8-8a5b9692bf6a\\\",\\\"systemUUID\\\":\\\"e1044399-4f18-4ebb-9d7a-d6302ff4a7fe\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 11 10:35:14 crc kubenswrapper[5016]: E1211 10:35:14.460163 5016 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.461681 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.461711 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.461724 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.461742 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.461754 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.474101 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.474108 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:14 crc kubenswrapper[5016]: E1211 10:35:14.474268 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:14 crc kubenswrapper[5016]: E1211 10:35:14.474343 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.568853 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.568889 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.568898 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.568912 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.568921 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.670901 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.670951 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.670963 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.670978 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.670989 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.773675 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.773709 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.773719 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.773734 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.773745 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.876398 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.876437 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.876445 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.876458 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.876466 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.986213 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.986267 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.986516 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.986743 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:14 crc kubenswrapper[5016]: I1211 10:35:14.986830 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:14Z","lastTransitionTime":"2025-12-11T10:35:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.089185 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.089210 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.089217 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.089229 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.089237 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:15Z","lastTransitionTime":"2025-12-11T10:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.131844 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.191832 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.191865 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.191878 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.191896 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.191909 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:15Z","lastTransitionTime":"2025-12-11T10:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.294240 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.294268 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.294275 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.294287 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.294296 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:15Z","lastTransitionTime":"2025-12-11T10:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.306496 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.320266 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" event={"ID":"a73d5878-697e-4e13-924c-248fb9150c9e","Type":"ContainerStarted","Data":"fee5b19122df9b2df8e07f1c8ceee0e72c5d2e7b39f9a01fafbd7e65dedf8bca"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.321148 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerStarted","Data":"be4c2942be4019e7feb3d1bdce1b71545316d18531027f8142994eded60efb0a"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.321921 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-2696g" event={"ID":"6ca7f819-150e-4755-9037-f3f192f4b42d","Type":"ContainerStarted","Data":"95306ffdfdf920b0bf511194ac53d90c47316fb074f9228c63a2b1cadad48da8"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.322671 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-rgpjs" event={"ID":"113e28fe-424e-491f-a50b-29ddf3e19cb8","Type":"ContainerStarted","Data":"4945632ffefba1f8369629a1b8ed3f32bae3f830dbf909a16ce8106ec6a6db4c"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.396430 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.396467 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.396478 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.396496 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.396509 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:15Z","lastTransitionTime":"2025-12-11T10:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.474138 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:15 crc kubenswrapper[5016]: E1211 10:35:15.474324 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.498594 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.498829 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.498931 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.499084 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.499183 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:15Z","lastTransitionTime":"2025-12-11T10:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.503545 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.601621 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.601679 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.601692 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.601708 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.602176 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:15Z","lastTransitionTime":"2025-12-11T10:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.603422 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.708775 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.709184 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.709322 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.709463 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.709587 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:15Z","lastTransitionTime":"2025-12-11T10:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.813107 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.813146 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.813155 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.813168 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.813177 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:15Z","lastTransitionTime":"2025-12-11T10:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.916687 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.917213 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.917389 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.917568 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:15 crc kubenswrapper[5016]: I1211 10:35:15.917699 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:15Z","lastTransitionTime":"2025-12-11T10:35:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.020439 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.020487 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.020498 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.020515 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.020527 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:16Z","lastTransitionTime":"2025-12-11T10:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.123136 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.123216 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.123237 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.123263 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.123285 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:16Z","lastTransitionTime":"2025-12-11T10:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.226346 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.226396 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.226410 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.226432 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.226446 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:16Z","lastTransitionTime":"2025-12-11T10:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.327594 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-k9ssc" event={"ID":"62530621-fff3-49c0-ba0d-14d7ec144c5f","Type":"ContainerStarted","Data":"59a73738842c1797343aa10ad817ab610f63bee3f401f93a272a94cfd822d25a"} Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.328632 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.328671 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.328683 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.328702 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.328715 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:16Z","lastTransitionTime":"2025-12-11T10:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.432047 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.432108 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.432164 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.432191 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.432206 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:16Z","lastTransitionTime":"2025-12-11T10:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.473656 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.473703 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:16 crc kubenswrapper[5016]: E1211 10:35:16.473804 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:16 crc kubenswrapper[5016]: E1211 10:35:16.473898 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.534930 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.534999 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.535011 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.535028 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.535041 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:16Z","lastTransitionTime":"2025-12-11T10:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.637547 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.637616 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.637631 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.637655 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.637674 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:16Z","lastTransitionTime":"2025-12-11T10:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.739665 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.739728 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.739740 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.739811 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.739823 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:16Z","lastTransitionTime":"2025-12-11T10:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.842563 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.842597 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.842607 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.842623 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.842633 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:16Z","lastTransitionTime":"2025-12-11T10:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.946025 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.946070 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.946085 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.946103 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:16 crc kubenswrapper[5016]: I1211 10:35:16.946116 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:16Z","lastTransitionTime":"2025-12-11T10:35:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.048185 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.048229 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.048239 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.048252 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.048263 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:17Z","lastTransitionTime":"2025-12-11T10:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.151157 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.151613 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.151706 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.151785 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.151854 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:17Z","lastTransitionTime":"2025-12-11T10:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.254223 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.254277 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.254290 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.254308 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.254319 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:17Z","lastTransitionTime":"2025-12-11T10:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.357204 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.357235 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.357244 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.357257 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.357266 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:17Z","lastTransitionTime":"2025-12-11T10:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.459572 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.459610 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.459618 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.459633 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.459642 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:17Z","lastTransitionTime":"2025-12-11T10:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.474009 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:17 crc kubenswrapper[5016]: E1211 10:35:17.474172 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.562281 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.562356 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.562368 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.562386 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.562400 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:17Z","lastTransitionTime":"2025-12-11T10:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.664918 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.664979 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.664988 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.665001 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.665013 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:17Z","lastTransitionTime":"2025-12-11T10:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.767537 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.767583 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.767596 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.767612 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.767623 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:17Z","lastTransitionTime":"2025-12-11T10:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.871167 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.871205 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.871215 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.871229 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.871238 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:17Z","lastTransitionTime":"2025-12-11T10:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.973530 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.973559 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.973566 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.973578 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:17 crc kubenswrapper[5016]: I1211 10:35:17.973589 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:17Z","lastTransitionTime":"2025-12-11T10:35:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.077348 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.077381 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.077390 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.077409 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.077423 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:18Z","lastTransitionTime":"2025-12-11T10:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.180214 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.180248 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.180256 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.180269 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.180281 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:18Z","lastTransitionTime":"2025-12-11T10:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.284361 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.284422 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.284433 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.284465 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.284477 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:18Z","lastTransitionTime":"2025-12-11T10:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.338140 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"c4fe6012132bf0a523ec1352b1daf4f8c517c373d7797ad2a4048d816aafa66a"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.340073 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-2696g" event={"ID":"6ca7f819-150e-4755-9037-f3f192f4b42d","Type":"ContainerStarted","Data":"e044da6ea4d00917f8af5188ffa424f7b5f21c3875af0367bd56b1955899553e"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.342095 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-rgpjs" event={"ID":"113e28fe-424e-491f-a50b-29ddf3e19cb8","Type":"ContainerStarted","Data":"f1f56daca4ef2de4d4189224003c2f54fe69b8f01c81b031148be1c0713e9b64"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.343719 5016 generic.go:334] "Generic (PLEG): container finished" podID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerID="5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae" exitCode=0 Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.343774 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerDied","Data":"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.345354 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-k9ssc" event={"ID":"62530621-fff3-49c0-ba0d-14d7ec144c5f","Type":"ContainerStarted","Data":"5ef674cb488ca21a343bb2740538b28a8ef41adf15b2fcae0eac5a1f9439e210"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.346929 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"b7028fe427b7682d3e5b7f2a5e7fedee9c12ebb5f609d4c361ba3d5fed28bee0"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.348528 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" event={"ID":"a73d5878-697e-4e13-924c-248fb9150c9e","Type":"ContainerStarted","Data":"c1fa0f2d990b0bfa38220c15b5463957a02513e021d1d7c52968e85d35e060ff"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.350184 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"e75dc69f6bb03674b279b85fb2414d5f284bcb66664c2bfdcf7b05d4b8f9a450"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.363923 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rgpjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"113e28fe-424e-491f-a50b-29ddf3e19cb8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t5hc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rgpjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.376838 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-2696g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ca7f819-150e-4755-9037-f3f192f4b42d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:13Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:13Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:13Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qtx8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:13Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-2696g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.386989 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.387021 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.387069 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.387083 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.387093 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:18Z","lastTransitionTime":"2025-12-11T10:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.392378 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.405771 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-k9ssc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62530621-fff3-49c0-ba0d-14d7ec144c5f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ckw66\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-k9ssc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.423613 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.438771 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e75dc69f6bb03674b279b85fb2414d5f284bcb66664c2bfdcf7b05d4b8f9a450\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.455022 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"10a14504-cb78-4a73-96dd-5fe8640132aa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2d2316a570824689ae8b4652bce6589ad8d06861d7bdfddd68e02452bb9f10d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0cfe1d8ecd790f0d9aae8e53944e730b397e1fd8409e5a3ef5ab590f956596\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.470870 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"071ea1a0-65ea-49d7-a4b1-0f8a312c0112\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31d86d5f7654802bd05a4a9c4d506deb5b9bcf5fd1ae02722093170105a93409\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1211 10:34:57.908524 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1211 10:34:57.912494 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2684440780/tls.crt::/tmp/serving-cert-2684440780/tls.key\\\\\\\"\\\\nI1211 10:35:04.252522 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1211 10:35:04.257124 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1211 10:35:04.257226 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1211 10:35:04.257293 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1211 10:35:04.257339 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1211 10:35:04.268814 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1211 10:35:04.268871 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268878 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1211 10:35:04.268885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1211 10:35:04.268889 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1211 10:35:04.268893 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1211 10:35:04.268897 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1211 10:35:04.269301 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1211 10:35:04.273272 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:35:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.473658 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.473663 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:18 crc kubenswrapper[5016]: E1211 10:35:18.473916 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:18 crc kubenswrapper[5016]: E1211 10:35:18.474059 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.484399 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a285d2cbeb8195ae60cf34839e1f66992e57742406574b0a00de9d52e8a4f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:35:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.489411 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.489458 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.489467 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.489487 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.489497 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:18Z","lastTransitionTime":"2025-12-11T10:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.497975 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.514024 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a73d5878-697e-4e13-924c-248fb9150c9e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-54q52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.541548 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a090784-1b4b-4c21-b425-9ea90576fc74\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7m8vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.568465 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5610bfa4-13d2-4186-a9e2-f18f8714d039\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21ac855626db2f18c17bf52510532154bf818297876b48f22f4b9bcb23201038\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d88a904ce0e220e5000a352352484e0aeac0dda15115ca38f6654ede993a6a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a34a2d537d5a93deb3f35d272c93e721690126ff5fcf82f28246a0e582f32ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4915d3fe61301ca89c16f7f8b63cdd7b618d6b2096efd9834c0ef09396b5da9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67f3620eb07a4726b9869bb3a235b066ca330e4ec8d5a4ae6fe3a5236b70f4d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.587739 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:05Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.591311 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.591400 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.591413 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.591438 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.591456 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:18Z","lastTransitionTime":"2025-12-11T10:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.605837 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e679c083-2480-4bc8-a8ea-dc2ff0412508\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s4cmd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2x7t7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:18Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.695188 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.695764 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.695779 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.695799 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.695811 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:18Z","lastTransitionTime":"2025-12-11T10:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.798881 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.798921 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.798931 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.798968 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.798979 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:18Z","lastTransitionTime":"2025-12-11T10:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.901095 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.901129 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.901137 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.901149 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:18 crc kubenswrapper[5016]: I1211 10:35:18.901159 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:18Z","lastTransitionTime":"2025-12-11T10:35:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.003996 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.004285 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.004294 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.004308 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.004317 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:19Z","lastTransitionTime":"2025-12-11T10:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.106468 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.106506 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.106517 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.106534 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.106545 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:19Z","lastTransitionTime":"2025-12-11T10:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.213827 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.213855 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.213867 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.213884 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.213896 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:19Z","lastTransitionTime":"2025-12-11T10:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.316598 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.316640 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.316650 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.316665 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.316676 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:19Z","lastTransitionTime":"2025-12-11T10:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.359236 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"bf41fa53a21b70a77ae9a34257973f61da6b1d7767f3a93c1d1cbb19f846b554"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.360629 5016 generic.go:334] "Generic (PLEG): container finished" podID="a73d5878-697e-4e13-924c-248fb9150c9e" containerID="c1fa0f2d990b0bfa38220c15b5463957a02513e021d1d7c52968e85d35e060ff" exitCode=0 Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.360679 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" event={"ID":"a73d5878-697e-4e13-924c-248fb9150c9e","Type":"ContainerDied","Data":"c1fa0f2d990b0bfa38220c15b5463957a02513e021d1d7c52968e85d35e060ff"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.394016 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a73d5878-697e-4e13-924c-248fb9150c9e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gf92s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-54q52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:19Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.415761 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a090784-1b4b-4c21-b425-9ea90576fc74\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:11Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6hw9x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:35:11Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7m8vj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:19Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.419240 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.419267 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.419275 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.419289 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.419298 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:19Z","lastTransitionTime":"2025-12-11T10:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.438016 5016 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5610bfa4-13d2-4186-a9e2-f18f8714d039\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:35:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-11T10:34:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21ac855626db2f18c17bf52510532154bf818297876b48f22f4b9bcb23201038\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d88a904ce0e220e5000a352352484e0aeac0dda15115ca38f6654ede993a6a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a34a2d537d5a93deb3f35d272c93e721690126ff5fcf82f28246a0e582f32ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4915d3fe61301ca89c16f7f8b63cdd7b618d6b2096efd9834c0ef09396b5da9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67f3620eb07a4726b9869bb3a235b066ca330e4ec8d5a4ae6fe3a5236b70f4d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-11T10:34:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://287c70e3339881583c2cf47c48500688cc8183d062e02cb3bdc346856b2ba8c0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://537304295f5dfda6e6a6a5b6559650b04caf272d41588df69a9f4d6c006cd9fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:45Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb5685d98b1785fb2fb409e2d21866bb758a66431a531194d5a88a031a1156f5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-11T10:34:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-11T10:34:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-11T10:34:43Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-11T10:35:19Z is after 2025-08-24T17:21:41Z" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.474305 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:19 crc kubenswrapper[5016]: E1211 10:35:19.474444 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.513570 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podStartSLOduration=10.513553182 podStartE2EDuration="10.513553182s" podCreationTimestamp="2025-12-11 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:19.491418668 +0000 UTC m=+36.309978267" watchObservedRunningTime="2025-12-11 10:35:19.513553182 +0000 UTC m=+36.332112761" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.523202 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.523237 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.523246 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.523260 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.523269 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:19Z","lastTransitionTime":"2025-12-11T10:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.599896 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=15.599877973 podStartE2EDuration="15.599877973s" podCreationTimestamp="2025-12-11 10:35:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:19.599721859 +0000 UTC m=+36.418281458" watchObservedRunningTime="2025-12-11 10:35:19.599877973 +0000 UTC m=+36.418437552" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.624272 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=15.62425493 podStartE2EDuration="15.62425493s" podCreationTimestamp="2025-12-11 10:35:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:19.624124327 +0000 UTC m=+36.442683916" watchObservedRunningTime="2025-12-11 10:35:19.62425493 +0000 UTC m=+36.442814509" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.625459 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.625504 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.625517 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.625534 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.625545 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:19Z","lastTransitionTime":"2025-12-11T10:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.670314 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-k9ssc" podStartSLOduration=10.670294179999999 podStartE2EDuration="10.67029418s" podCreationTimestamp="2025-12-11 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:19.669669425 +0000 UTC m=+36.488229014" watchObservedRunningTime="2025-12-11 10:35:19.67029418 +0000 UTC m=+36.488853759" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.728396 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.728441 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.728452 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.728470 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.728484 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:19Z","lastTransitionTime":"2025-12-11T10:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.742248 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=13.742228454 podStartE2EDuration="13.742228454s" podCreationTimestamp="2025-12-11 10:35:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:19.740636806 +0000 UTC m=+36.559196415" watchObservedRunningTime="2025-12-11 10:35:19.742228454 +0000 UTC m=+36.560788053" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.756493 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-rgpjs" podStartSLOduration=10.756470927 podStartE2EDuration="10.756470927s" podCreationTimestamp="2025-12-11 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:19.755236168 +0000 UTC m=+36.573795757" watchObservedRunningTime="2025-12-11 10:35:19.756470927 +0000 UTC m=+36.575030506" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.778579 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-2696g" podStartSLOduration=10.77855632 podStartE2EDuration="10.77855632s" podCreationTimestamp="2025-12-11 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:19.77812988 +0000 UTC m=+36.596689469" watchObservedRunningTime="2025-12-11 10:35:19.77855632 +0000 UTC m=+36.597115899" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.795352 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz"] Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.795753 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.798046 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.798808 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.827822 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-v2qvr"] Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.831112 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:19 crc kubenswrapper[5016]: E1211 10:35:19.831213 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-v2qvr" podUID="d29d8609-2309-45b2-abc7-b4e10ae27eeb" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.844887 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.844920 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.844928 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.844977 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.844986 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:19Z","lastTransitionTime":"2025-12-11T10:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.854677 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs\") pod \"network-metrics-daemon-v2qvr\" (UID: \"d29d8609-2309-45b2-abc7-b4e10ae27eeb\") " pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.854728 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ff262272-63f6-4657-bdee-b10c2e65e478-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2wrfz\" (UID: \"ff262272-63f6-4657-bdee-b10c2e65e478\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.854750 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dztb4\" (UniqueName: \"kubernetes.io/projected/d29d8609-2309-45b2-abc7-b4e10ae27eeb-kube-api-access-dztb4\") pod \"network-metrics-daemon-v2qvr\" (UID: \"d29d8609-2309-45b2-abc7-b4e10ae27eeb\") " pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.854770 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ff262272-63f6-4657-bdee-b10c2e65e478-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2wrfz\" (UID: \"ff262272-63f6-4657-bdee-b10c2e65e478\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.854851 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngsvg\" (UniqueName: \"kubernetes.io/projected/ff262272-63f6-4657-bdee-b10c2e65e478-kube-api-access-ngsvg\") pod \"ovnkube-control-plane-749d76644c-2wrfz\" (UID: \"ff262272-63f6-4657-bdee-b10c2e65e478\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.854900 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ff262272-63f6-4657-bdee-b10c2e65e478-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2wrfz\" (UID: \"ff262272-63f6-4657-bdee-b10c2e65e478\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.947479 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.947508 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.947516 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.947529 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.947537 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:19Z","lastTransitionTime":"2025-12-11T10:35:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.956285 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngsvg\" (UniqueName: \"kubernetes.io/projected/ff262272-63f6-4657-bdee-b10c2e65e478-kube-api-access-ngsvg\") pod \"ovnkube-control-plane-749d76644c-2wrfz\" (UID: \"ff262272-63f6-4657-bdee-b10c2e65e478\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.956360 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ff262272-63f6-4657-bdee-b10c2e65e478-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2wrfz\" (UID: \"ff262272-63f6-4657-bdee-b10c2e65e478\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.956411 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs\") pod \"network-metrics-daemon-v2qvr\" (UID: \"d29d8609-2309-45b2-abc7-b4e10ae27eeb\") " pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.956436 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ff262272-63f6-4657-bdee-b10c2e65e478-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2wrfz\" (UID: \"ff262272-63f6-4657-bdee-b10c2e65e478\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: E1211 10:35:19.956568 5016 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 10:35:19 crc kubenswrapper[5016]: E1211 10:35:19.956626 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs podName:d29d8609-2309-45b2-abc7-b4e10ae27eeb nodeName:}" failed. No retries permitted until 2025-12-11 10:35:20.456606521 +0000 UTC m=+37.275166100 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs") pod "network-metrics-daemon-v2qvr" (UID: "d29d8609-2309-45b2-abc7-b4e10ae27eeb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.956909 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dztb4\" (UniqueName: \"kubernetes.io/projected/d29d8609-2309-45b2-abc7-b4e10ae27eeb-kube-api-access-dztb4\") pod \"network-metrics-daemon-v2qvr\" (UID: \"d29d8609-2309-45b2-abc7-b4e10ae27eeb\") " pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.957154 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ff262272-63f6-4657-bdee-b10c2e65e478-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2wrfz\" (UID: \"ff262272-63f6-4657-bdee-b10c2e65e478\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.957866 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ff262272-63f6-4657-bdee-b10c2e65e478-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2wrfz\" (UID: \"ff262272-63f6-4657-bdee-b10c2e65e478\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.958011 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ff262272-63f6-4657-bdee-b10c2e65e478-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2wrfz\" (UID: \"ff262272-63f6-4657-bdee-b10c2e65e478\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.961166 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ff262272-63f6-4657-bdee-b10c2e65e478-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2wrfz\" (UID: \"ff262272-63f6-4657-bdee-b10c2e65e478\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.977354 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngsvg\" (UniqueName: \"kubernetes.io/projected/ff262272-63f6-4657-bdee-b10c2e65e478-kube-api-access-ngsvg\") pod \"ovnkube-control-plane-749d76644c-2wrfz\" (UID: \"ff262272-63f6-4657-bdee-b10c2e65e478\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:19 crc kubenswrapper[5016]: I1211 10:35:19.977567 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dztb4\" (UniqueName: \"kubernetes.io/projected/d29d8609-2309-45b2-abc7-b4e10ae27eeb-kube-api-access-dztb4\") pod \"network-metrics-daemon-v2qvr\" (UID: \"d29d8609-2309-45b2-abc7-b4e10ae27eeb\") " pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.049981 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.050019 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.050033 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.050050 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.050060 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:20Z","lastTransitionTime":"2025-12-11T10:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.152179 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.152223 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.152236 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.152251 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.152261 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:20Z","lastTransitionTime":"2025-12-11T10:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.162430 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" Dec 11 10:35:20 crc kubenswrapper[5016]: W1211 10:35:20.172255 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff262272_63f6_4657_bdee_b10c2e65e478.slice/crio-ac3368acf94f98d7f5837896767877eab49a46539223bd64600148927508537f WatchSource:0}: Error finding container ac3368acf94f98d7f5837896767877eab49a46539223bd64600148927508537f: Status 404 returned error can't find the container with id ac3368acf94f98d7f5837896767877eab49a46539223bd64600148927508537f Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.255631 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.255667 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.255679 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.255695 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.255705 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:20Z","lastTransitionTime":"2025-12-11T10:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.358051 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.358305 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.358313 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.358330 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.358347 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:20Z","lastTransitionTime":"2025-12-11T10:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.365123 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" event={"ID":"ff262272-63f6-4657-bdee-b10c2e65e478","Type":"ContainerStarted","Data":"ac3368acf94f98d7f5837896767877eab49a46539223bd64600148927508537f"} Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.460438 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.460468 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.460477 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.460490 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.460500 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:20Z","lastTransitionTime":"2025-12-11T10:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.461894 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs\") pod \"network-metrics-daemon-v2qvr\" (UID: \"d29d8609-2309-45b2-abc7-b4e10ae27eeb\") " pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:20 crc kubenswrapper[5016]: E1211 10:35:20.462075 5016 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 10:35:20 crc kubenswrapper[5016]: E1211 10:35:20.462130 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs podName:d29d8609-2309-45b2-abc7-b4e10ae27eeb nodeName:}" failed. No retries permitted until 2025-12-11 10:35:21.462116216 +0000 UTC m=+38.280675795 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs") pod "network-metrics-daemon-v2qvr" (UID: "d29d8609-2309-45b2-abc7-b4e10ae27eeb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.473397 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.473469 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:20 crc kubenswrapper[5016]: E1211 10:35:20.473527 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:20 crc kubenswrapper[5016]: E1211 10:35:20.473633 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.562528 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.562560 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.562572 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.562589 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.562600 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:20Z","lastTransitionTime":"2025-12-11T10:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.665076 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.665127 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.665136 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.665152 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.665163 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:20Z","lastTransitionTime":"2025-12-11T10:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.767920 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.768010 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.768026 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.768049 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.768062 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:20Z","lastTransitionTime":"2025-12-11T10:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.870811 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.870839 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.870850 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.870865 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.870877 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:20Z","lastTransitionTime":"2025-12-11T10:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.974264 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.974355 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.974386 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.974418 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:20 crc kubenswrapper[5016]: I1211 10:35:20.974449 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:20Z","lastTransitionTime":"2025-12-11T10:35:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.067183 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.067437 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.067463 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:37.067425828 +0000 UTC m=+53.885985457 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.067519 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.067574 5016 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.067650 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:37.067629522 +0000 UTC m=+53.886189141 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.067721 5016 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.067783 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:37.067766346 +0000 UTC m=+53.886325945 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.077032 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.077072 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.077080 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.077096 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.077106 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:21Z","lastTransitionTime":"2025-12-11T10:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.168682 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.168744 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.168877 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.168881 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.168915 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.168896 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.168977 5016 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.168977 5016 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.169028 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:37.169012056 +0000 UTC m=+53.987571645 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.169042 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:37.169035516 +0000 UTC m=+53.987595095 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.180481 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.180527 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.180539 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.180554 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.180566 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:21Z","lastTransitionTime":"2025-12-11T10:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.283255 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.283283 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.283292 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.283306 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.283315 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:21Z","lastTransitionTime":"2025-12-11T10:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.386451 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.386503 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.386519 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.386541 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.386559 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:21Z","lastTransitionTime":"2025-12-11T10:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.473137 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs\") pod \"network-metrics-daemon-v2qvr\" (UID: \"d29d8609-2309-45b2-abc7-b4e10ae27eeb\") " pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.473300 5016 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.473357 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs podName:d29d8609-2309-45b2-abc7-b4e10ae27eeb nodeName:}" failed. No retries permitted until 2025-12-11 10:35:23.473341232 +0000 UTC m=+40.291900811 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs") pod "network-metrics-daemon-v2qvr" (UID: "d29d8609-2309-45b2-abc7-b4e10ae27eeb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.474578 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.474621 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.474723 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-v2qvr" podUID="d29d8609-2309-45b2-abc7-b4e10ae27eeb" Dec 11 10:35:21 crc kubenswrapper[5016]: E1211 10:35:21.474823 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.489781 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.489850 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.489866 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.489892 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.489907 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:21Z","lastTransitionTime":"2025-12-11T10:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.592331 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.592366 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.592377 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.592391 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.592401 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:21Z","lastTransitionTime":"2025-12-11T10:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.694756 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.694795 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.694805 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.694819 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.694829 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:21Z","lastTransitionTime":"2025-12-11T10:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.797426 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.797452 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.797460 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.797472 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.797481 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:21Z","lastTransitionTime":"2025-12-11T10:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.899218 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.899291 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.899350 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.899376 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:21 crc kubenswrapper[5016]: I1211 10:35:21.899396 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:21Z","lastTransitionTime":"2025-12-11T10:35:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.001281 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.001326 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.001338 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.001355 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.001367 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:22Z","lastTransitionTime":"2025-12-11T10:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.103222 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.103289 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.103299 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.103322 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.103340 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:22Z","lastTransitionTime":"2025-12-11T10:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.205451 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.205498 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.205508 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.205522 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.205533 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:22Z","lastTransitionTime":"2025-12-11T10:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.307266 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.307304 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.307314 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.307329 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.307341 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:22Z","lastTransitionTime":"2025-12-11T10:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.377298 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" event={"ID":"a73d5878-697e-4e13-924c-248fb9150c9e","Type":"ContainerStarted","Data":"d83846c80b0be07912821685be4604120f3c711201537ae7da3bf08992734da3"} Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.412605 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.412651 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.412664 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.412678 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.412688 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:22Z","lastTransitionTime":"2025-12-11T10:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.473830 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.473857 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:22 crc kubenswrapper[5016]: E1211 10:35:22.474007 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:22 crc kubenswrapper[5016]: E1211 10:35:22.474105 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.514696 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.514734 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.514745 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.514791 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.514803 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:22Z","lastTransitionTime":"2025-12-11T10:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.622379 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.622412 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.622423 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.622437 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.622448 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:22Z","lastTransitionTime":"2025-12-11T10:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.724551 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.724812 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.724827 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.724842 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.724855 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:22Z","lastTransitionTime":"2025-12-11T10:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.827548 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.827597 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.827607 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.827622 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.827630 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:22Z","lastTransitionTime":"2025-12-11T10:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.929504 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.929562 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.929584 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.929612 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:22 crc kubenswrapper[5016]: I1211 10:35:22.929633 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:22Z","lastTransitionTime":"2025-12-11T10:35:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.032124 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.032187 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.032202 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.032219 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.032234 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:23Z","lastTransitionTime":"2025-12-11T10:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.135697 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.135771 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.135782 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.135799 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.135811 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:23Z","lastTransitionTime":"2025-12-11T10:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.239806 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.239842 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.239850 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.239863 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.239873 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:23Z","lastTransitionTime":"2025-12-11T10:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.343153 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.343194 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.343206 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.343222 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.343233 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:23Z","lastTransitionTime":"2025-12-11T10:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.389068 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" event={"ID":"ff262272-63f6-4657-bdee-b10c2e65e478","Type":"ContainerStarted","Data":"9056dbd5796560c983c9899c4efb0be764f5e3a187ab31e3548dc46b36d95301"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.389125 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" event={"ID":"ff262272-63f6-4657-bdee-b10c2e65e478","Type":"ContainerStarted","Data":"81f1af792f1009c3db356f6db4f0ef64b783922c3be2716d605fe563cced5079"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.391325 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerStarted","Data":"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.391361 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerStarted","Data":"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.445288 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.445329 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.445346 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.445362 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.445372 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:23Z","lastTransitionTime":"2025-12-11T10:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.451984 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2wrfz" podStartSLOduration=13.451960745 podStartE2EDuration="13.451960745s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:23.423424526 +0000 UTC m=+40.241984105" watchObservedRunningTime="2025-12-11 10:35:23.451960745 +0000 UTC m=+40.270520324" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.474282 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.474395 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:23 crc kubenswrapper[5016]: E1211 10:35:23.475355 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-v2qvr" podUID="d29d8609-2309-45b2-abc7-b4e10ae27eeb" Dec 11 10:35:23 crc kubenswrapper[5016]: E1211 10:35:23.475522 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.496838 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs\") pod \"network-metrics-daemon-v2qvr\" (UID: \"d29d8609-2309-45b2-abc7-b4e10ae27eeb\") " pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:23 crc kubenswrapper[5016]: E1211 10:35:23.497052 5016 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 10:35:23 crc kubenswrapper[5016]: E1211 10:35:23.497135 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs podName:d29d8609-2309-45b2-abc7-b4e10ae27eeb nodeName:}" failed. No retries permitted until 2025-12-11 10:35:27.497114613 +0000 UTC m=+44.315674192 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs") pod "network-metrics-daemon-v2qvr" (UID: "d29d8609-2309-45b2-abc7-b4e10ae27eeb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.548041 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.548074 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.548083 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.548097 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.548106 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:23Z","lastTransitionTime":"2025-12-11T10:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.650786 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.650822 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.650831 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.650844 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.650852 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:23Z","lastTransitionTime":"2025-12-11T10:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.753349 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.753415 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.753424 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.753439 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.753449 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:23Z","lastTransitionTime":"2025-12-11T10:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.855576 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.855621 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.855632 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.855649 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.855660 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:23Z","lastTransitionTime":"2025-12-11T10:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.959296 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.959343 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.959355 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.959372 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:23 crc kubenswrapper[5016]: I1211 10:35:23.959382 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:23Z","lastTransitionTime":"2025-12-11T10:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.062100 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.062157 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.062168 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.062189 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.062201 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:24Z","lastTransitionTime":"2025-12-11T10:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.164571 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.164605 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.164615 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.164632 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.164642 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:24Z","lastTransitionTime":"2025-12-11T10:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.266566 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.266602 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.266614 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.266631 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.266643 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:24Z","lastTransitionTime":"2025-12-11T10:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.368573 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.368597 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.368607 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.368620 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.368630 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:24Z","lastTransitionTime":"2025-12-11T10:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.396780 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerStarted","Data":"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b"} Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.398431 5016 generic.go:334] "Generic (PLEG): container finished" podID="a73d5878-697e-4e13-924c-248fb9150c9e" containerID="d83846c80b0be07912821685be4604120f3c711201537ae7da3bf08992734da3" exitCode=0 Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.398493 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" event={"ID":"a73d5878-697e-4e13-924c-248fb9150c9e","Type":"ContainerDied","Data":"d83846c80b0be07912821685be4604120f3c711201537ae7da3bf08992734da3"} Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.471140 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.471168 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.471178 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.471194 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.471205 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:24Z","lastTransitionTime":"2025-12-11T10:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.474041 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:24 crc kubenswrapper[5016]: E1211 10:35:24.474129 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.474412 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:24 crc kubenswrapper[5016]: E1211 10:35:24.474457 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.577170 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.577212 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.577223 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.577240 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.577251 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:24Z","lastTransitionTime":"2025-12-11T10:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.622653 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.622685 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.622696 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.622713 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.622724 5016 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-11T10:35:24Z","lastTransitionTime":"2025-12-11T10:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.680126 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f"] Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.681811 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.685555 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.687157 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.687307 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.687432 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.718998 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/5640ce19-9825-4fa6-80d3-0e97cb7980b2-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.719040 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/5640ce19-9825-4fa6-80d3-0e97cb7980b2-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.719092 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5640ce19-9825-4fa6-80d3-0e97cb7980b2-service-ca\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.719354 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5640ce19-9825-4fa6-80d3-0e97cb7980b2-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.719382 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5640ce19-9825-4fa6-80d3-0e97cb7980b2-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.820238 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/5640ce19-9825-4fa6-80d3-0e97cb7980b2-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.820306 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/5640ce19-9825-4fa6-80d3-0e97cb7980b2-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.820370 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5640ce19-9825-4fa6-80d3-0e97cb7980b2-service-ca\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.820410 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5640ce19-9825-4fa6-80d3-0e97cb7980b2-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.820424 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/5640ce19-9825-4fa6-80d3-0e97cb7980b2-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.820458 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/5640ce19-9825-4fa6-80d3-0e97cb7980b2-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.820443 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5640ce19-9825-4fa6-80d3-0e97cb7980b2-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.822026 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5640ce19-9825-4fa6-80d3-0e97cb7980b2-service-ca\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.829666 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5640ce19-9825-4fa6-80d3-0e97cb7980b2-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.841197 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5640ce19-9825-4fa6-80d3-0e97cb7980b2-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-vjg9f\" (UID: \"5640ce19-9825-4fa6-80d3-0e97cb7980b2\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:24 crc kubenswrapper[5016]: I1211 10:35:24.995582 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" Dec 11 10:35:25 crc kubenswrapper[5016]: I1211 10:35:25.405826 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerStarted","Data":"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2"} Dec 11 10:35:25 crc kubenswrapper[5016]: I1211 10:35:25.405869 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerStarted","Data":"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e"} Dec 11 10:35:25 crc kubenswrapper[5016]: I1211 10:35:25.405880 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerStarted","Data":"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3"} Dec 11 10:35:25 crc kubenswrapper[5016]: I1211 10:35:25.407057 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" event={"ID":"5640ce19-9825-4fa6-80d3-0e97cb7980b2","Type":"ContainerStarted","Data":"8bbace6fe303a0d38f20cf5a9f5a61212c983047081d5367f802f10bad550f44"} Dec 11 10:35:25 crc kubenswrapper[5016]: I1211 10:35:25.407166 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" event={"ID":"5640ce19-9825-4fa6-80d3-0e97cb7980b2","Type":"ContainerStarted","Data":"acb27551e60b25a288402716fa2a504285c42f89af705fe38fe27de8b1554f2a"} Dec 11 10:35:25 crc kubenswrapper[5016]: I1211 10:35:25.409205 5016 generic.go:334] "Generic (PLEG): container finished" podID="a73d5878-697e-4e13-924c-248fb9150c9e" containerID="023244a5748fbce22888fbb2cb09316258a09b0792a70e2062f271d7cdcccad4" exitCode=0 Dec 11 10:35:25 crc kubenswrapper[5016]: I1211 10:35:25.409238 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" event={"ID":"a73d5878-697e-4e13-924c-248fb9150c9e","Type":"ContainerDied","Data":"023244a5748fbce22888fbb2cb09316258a09b0792a70e2062f271d7cdcccad4"} Dec 11 10:35:25 crc kubenswrapper[5016]: I1211 10:35:25.423241 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-vjg9f" podStartSLOduration=15.423222551 podStartE2EDuration="15.423222551s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:25.422103384 +0000 UTC m=+42.240662983" watchObservedRunningTime="2025-12-11 10:35:25.423222551 +0000 UTC m=+42.241782130" Dec 11 10:35:25 crc kubenswrapper[5016]: I1211 10:35:25.474025 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:25 crc kubenswrapper[5016]: E1211 10:35:25.474148 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-v2qvr" podUID="d29d8609-2309-45b2-abc7-b4e10ae27eeb" Dec 11 10:35:25 crc kubenswrapper[5016]: I1211 10:35:25.474025 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:25 crc kubenswrapper[5016]: E1211 10:35:25.474253 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:26 crc kubenswrapper[5016]: I1211 10:35:26.418068 5016 generic.go:334] "Generic (PLEG): container finished" podID="a73d5878-697e-4e13-924c-248fb9150c9e" containerID="21c676d93349f3832c8d46ccbbfb8e6d155cc097c073d8798697e8236f413901" exitCode=0 Dec 11 10:35:26 crc kubenswrapper[5016]: I1211 10:35:26.418119 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" event={"ID":"a73d5878-697e-4e13-924c-248fb9150c9e","Type":"ContainerDied","Data":"21c676d93349f3832c8d46ccbbfb8e6d155cc097c073d8798697e8236f413901"} Dec 11 10:35:26 crc kubenswrapper[5016]: I1211 10:35:26.473805 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:26 crc kubenswrapper[5016]: I1211 10:35:26.473857 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:26 crc kubenswrapper[5016]: E1211 10:35:26.473980 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:26 crc kubenswrapper[5016]: E1211 10:35:26.474070 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:26 crc kubenswrapper[5016]: I1211 10:35:26.849521 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:35:27 crc kubenswrapper[5016]: I1211 10:35:27.432910 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" event={"ID":"a73d5878-697e-4e13-924c-248fb9150c9e","Type":"ContainerStarted","Data":"d5d00107e4e41bf3952eb47c8255bb5fe1f4b80e2cacf2b0d23210fcc59a020b"} Dec 11 10:35:27 crc kubenswrapper[5016]: I1211 10:35:27.473590 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:27 crc kubenswrapper[5016]: E1211 10:35:27.473770 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-v2qvr" podUID="d29d8609-2309-45b2-abc7-b4e10ae27eeb" Dec 11 10:35:27 crc kubenswrapper[5016]: I1211 10:35:27.474409 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:27 crc kubenswrapper[5016]: E1211 10:35:27.474522 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:27 crc kubenswrapper[5016]: I1211 10:35:27.559196 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs\") pod \"network-metrics-daemon-v2qvr\" (UID: \"d29d8609-2309-45b2-abc7-b4e10ae27eeb\") " pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:27 crc kubenswrapper[5016]: E1211 10:35:27.559385 5016 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 10:35:27 crc kubenswrapper[5016]: E1211 10:35:27.559505 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs podName:d29d8609-2309-45b2-abc7-b4e10ae27eeb nodeName:}" failed. No retries permitted until 2025-12-11 10:35:35.559479603 +0000 UTC m=+52.378039242 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs") pod "network-metrics-daemon-v2qvr" (UID: "d29d8609-2309-45b2-abc7-b4e10ae27eeb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 10:35:28 crc kubenswrapper[5016]: I1211 10:35:28.442928 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerStarted","Data":"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b"} Dec 11 10:35:28 crc kubenswrapper[5016]: I1211 10:35:28.474054 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:28 crc kubenswrapper[5016]: I1211 10:35:28.474054 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:28 crc kubenswrapper[5016]: E1211 10:35:28.474275 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:28 crc kubenswrapper[5016]: E1211 10:35:28.474364 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:29 crc kubenswrapper[5016]: I1211 10:35:29.474174 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:29 crc kubenswrapper[5016]: I1211 10:35:29.474305 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:29 crc kubenswrapper[5016]: E1211 10:35:29.474407 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-v2qvr" podUID="d29d8609-2309-45b2-abc7-b4e10ae27eeb" Dec 11 10:35:29 crc kubenswrapper[5016]: E1211 10:35:29.474453 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:30 crc kubenswrapper[5016]: I1211 10:35:30.474489 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:30 crc kubenswrapper[5016]: I1211 10:35:30.474520 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:30 crc kubenswrapper[5016]: E1211 10:35:30.474613 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:30 crc kubenswrapper[5016]: E1211 10:35:30.474750 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:31 crc kubenswrapper[5016]: I1211 10:35:31.474386 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:31 crc kubenswrapper[5016]: E1211 10:35:31.474615 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-v2qvr" podUID="d29d8609-2309-45b2-abc7-b4e10ae27eeb" Dec 11 10:35:31 crc kubenswrapper[5016]: I1211 10:35:31.474740 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:31 crc kubenswrapper[5016]: E1211 10:35:31.475089 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:32 crc kubenswrapper[5016]: I1211 10:35:32.474127 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:32 crc kubenswrapper[5016]: I1211 10:35:32.474270 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:32 crc kubenswrapper[5016]: E1211 10:35:32.474290 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:32 crc kubenswrapper[5016]: E1211 10:35:32.474565 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:33 crc kubenswrapper[5016]: I1211 10:35:33.464141 5016 generic.go:334] "Generic (PLEG): container finished" podID="a73d5878-697e-4e13-924c-248fb9150c9e" containerID="d5d00107e4e41bf3952eb47c8255bb5fe1f4b80e2cacf2b0d23210fcc59a020b" exitCode=0 Dec 11 10:35:33 crc kubenswrapper[5016]: I1211 10:35:33.464258 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" event={"ID":"a73d5878-697e-4e13-924c-248fb9150c9e","Type":"ContainerDied","Data":"d5d00107e4e41bf3952eb47c8255bb5fe1f4b80e2cacf2b0d23210fcc59a020b"} Dec 11 10:35:33 crc kubenswrapper[5016]: I1211 10:35:33.472460 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerStarted","Data":"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b"} Dec 11 10:35:33 crc kubenswrapper[5016]: I1211 10:35:33.473747 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:33 crc kubenswrapper[5016]: I1211 10:35:33.473773 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:33 crc kubenswrapper[5016]: E1211 10:35:33.474011 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-v2qvr" podUID="d29d8609-2309-45b2-abc7-b4e10ae27eeb" Dec 11 10:35:33 crc kubenswrapper[5016]: E1211 10:35:33.474137 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:34 crc kubenswrapper[5016]: I1211 10:35:34.473519 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:34 crc kubenswrapper[5016]: I1211 10:35:34.473567 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:34 crc kubenswrapper[5016]: E1211 10:35:34.473963 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:34 crc kubenswrapper[5016]: E1211 10:35:34.474028 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:34 crc kubenswrapper[5016]: I1211 10:35:34.480636 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" event={"ID":"a73d5878-697e-4e13-924c-248fb9150c9e","Type":"ContainerStarted","Data":"a33a5dda8db1f9f7b94fc61290ce786b1fc5d5efdc38c5b1efae678a222ff551"} Dec 11 10:35:34 crc kubenswrapper[5016]: I1211 10:35:34.480985 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:34 crc kubenswrapper[5016]: I1211 10:35:34.481024 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:34 crc kubenswrapper[5016]: I1211 10:35:34.504606 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" podStartSLOduration=24.504590588 podStartE2EDuration="24.504590588s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:34.50426928 +0000 UTC m=+51.322828879" watchObservedRunningTime="2025-12-11 10:35:34.504590588 +0000 UTC m=+51.323150167" Dec 11 10:35:34 crc kubenswrapper[5016]: I1211 10:35:34.994818 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:35 crc kubenswrapper[5016]: I1211 10:35:35.473547 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:35 crc kubenswrapper[5016]: I1211 10:35:35.473564 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:35 crc kubenswrapper[5016]: E1211 10:35:35.473674 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-v2qvr" podUID="d29d8609-2309-45b2-abc7-b4e10ae27eeb" Dec 11 10:35:35 crc kubenswrapper[5016]: E1211 10:35:35.473849 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:35 crc kubenswrapper[5016]: I1211 10:35:35.486230 5016 generic.go:334] "Generic (PLEG): container finished" podID="a73d5878-697e-4e13-924c-248fb9150c9e" containerID="a33a5dda8db1f9f7b94fc61290ce786b1fc5d5efdc38c5b1efae678a222ff551" exitCode=0 Dec 11 10:35:35 crc kubenswrapper[5016]: I1211 10:35:35.486289 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" event={"ID":"a73d5878-697e-4e13-924c-248fb9150c9e","Type":"ContainerDied","Data":"a33a5dda8db1f9f7b94fc61290ce786b1fc5d5efdc38c5b1efae678a222ff551"} Dec 11 10:35:35 crc kubenswrapper[5016]: I1211 10:35:35.486871 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:35 crc kubenswrapper[5016]: I1211 10:35:35.517727 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:35 crc kubenswrapper[5016]: I1211 10:35:35.649429 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs\") pod \"network-metrics-daemon-v2qvr\" (UID: \"d29d8609-2309-45b2-abc7-b4e10ae27eeb\") " pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:35 crc kubenswrapper[5016]: E1211 10:35:35.649567 5016 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 10:35:35 crc kubenswrapper[5016]: E1211 10:35:35.649649 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs podName:d29d8609-2309-45b2-abc7-b4e10ae27eeb nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.649627295 +0000 UTC m=+68.468186944 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs") pod "network-metrics-daemon-v2qvr" (UID: "d29d8609-2309-45b2-abc7-b4e10ae27eeb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 11 10:35:36 crc kubenswrapper[5016]: I1211 10:35:36.473676 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:36 crc kubenswrapper[5016]: E1211 10:35:36.473814 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:36 crc kubenswrapper[5016]: I1211 10:35:36.473692 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:36 crc kubenswrapper[5016]: E1211 10:35:36.474080 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:37 crc kubenswrapper[5016]: I1211 10:35:37.165387 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:37 crc kubenswrapper[5016]: I1211 10:35:37.165528 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:37 crc kubenswrapper[5016]: I1211 10:35:37.165561 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.165619 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:36:09.165588468 +0000 UTC m=+85.984148047 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.165659 5016 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.165711 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:36:09.165698251 +0000 UTC m=+85.984257830 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.165762 5016 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.165838 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-11 10:36:09.165818524 +0000 UTC m=+85.984378093 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 11 10:35:37 crc kubenswrapper[5016]: I1211 10:35:37.266864 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:37 crc kubenswrapper[5016]: I1211 10:35:37.266912 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.267046 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.267061 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.267079 5016 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.267084 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.267122 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-11 10:36:09.267108629 +0000 UTC m=+86.085668208 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.267123 5016 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.267141 5016 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.267196 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-11 10:36:09.267177531 +0000 UTC m=+86.085737150 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 11 10:35:37 crc kubenswrapper[5016]: I1211 10:35:37.351840 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 11 10:35:37 crc kubenswrapper[5016]: I1211 10:35:37.364704 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Dec 11 10:35:37 crc kubenswrapper[5016]: I1211 10:35:37.473837 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:37 crc kubenswrapper[5016]: I1211 10:35:37.473901 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.474076 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-v2qvr" podUID="d29d8609-2309-45b2-abc7-b4e10ae27eeb" Dec 11 10:35:37 crc kubenswrapper[5016]: E1211 10:35:37.474194 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:38 crc kubenswrapper[5016]: I1211 10:35:38.473654 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:38 crc kubenswrapper[5016]: I1211 10:35:38.473652 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:38 crc kubenswrapper[5016]: E1211 10:35:38.473772 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:38 crc kubenswrapper[5016]: E1211 10:35:38.473844 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:38 crc kubenswrapper[5016]: I1211 10:35:38.498022 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-54q52" event={"ID":"a73d5878-697e-4e13-924c-248fb9150c9e","Type":"ContainerStarted","Data":"63754f5e7134041c9dbb08d392e99f89b5a0956de47dd7abfd19bf7739dfdbab"} Dec 11 10:35:38 crc kubenswrapper[5016]: I1211 10:35:38.533912 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-54q52" podStartSLOduration=29.533893586 podStartE2EDuration="29.533893586s" podCreationTimestamp="2025-12-11 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:38.533400633 +0000 UTC m=+55.351960232" watchObservedRunningTime="2025-12-11 10:35:38.533893586 +0000 UTC m=+55.352453175" Dec 11 10:35:38 crc kubenswrapper[5016]: I1211 10:35:38.534752 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=1.534745666 podStartE2EDuration="1.534745666s" podCreationTimestamp="2025-12-11 10:35:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:38.51297321 +0000 UTC m=+55.331532799" watchObservedRunningTime="2025-12-11 10:35:38.534745666 +0000 UTC m=+55.353305245" Dec 11 10:35:39 crc kubenswrapper[5016]: I1211 10:35:39.314808 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-v2qvr"] Dec 11 10:35:39 crc kubenswrapper[5016]: I1211 10:35:39.315033 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:39 crc kubenswrapper[5016]: E1211 10:35:39.315153 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-v2qvr" podUID="d29d8609-2309-45b2-abc7-b4e10ae27eeb" Dec 11 10:35:39 crc kubenswrapper[5016]: I1211 10:35:39.473981 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:39 crc kubenswrapper[5016]: E1211 10:35:39.474118 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:40 crc kubenswrapper[5016]: I1211 10:35:40.474024 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:40 crc kubenswrapper[5016]: I1211 10:35:40.474173 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:40 crc kubenswrapper[5016]: E1211 10:35:40.474253 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 11 10:35:40 crc kubenswrapper[5016]: E1211 10:35:40.474430 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 11 10:35:40 crc kubenswrapper[5016]: I1211 10:35:40.475018 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:40 crc kubenswrapper[5016]: E1211 10:35:40.475186 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-v2qvr" podUID="d29d8609-2309-45b2-abc7-b4e10ae27eeb" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.473881 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:41 crc kubenswrapper[5016]: E1211 10:35:41.474030 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.664150 5016 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.664346 5016 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.703870 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.704259 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.708459 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.708689 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.709191 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.709268 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.709340 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.709450 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.716867 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-tn6f4"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.717303 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.717961 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.718403 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.718754 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hbw4j"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.719217 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.720545 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xdpcj"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.720871 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.721858 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4k8l5"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.722239 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.722562 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.722981 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.724173 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.724480 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.724670 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.724737 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.724764 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.724770 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.724739 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.724868 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.724870 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.724964 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.724971 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.725079 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.725080 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.725100 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.725130 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.725133 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.725168 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.725357 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.725548 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.725643 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.725727 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.725815 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.725960 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.726047 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.726204 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.726332 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.726440 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.726758 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.726793 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.727070 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.727167 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.727268 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.727273 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.727342 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.727418 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.727524 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.727658 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-b485d"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.727692 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.728142 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.729165 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.729589 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.731024 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-7rm8d"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.731437 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.732989 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-4cp4w"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.733508 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.737816 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.744123 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.759822 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.761537 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.762258 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.762915 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.763266 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.763530 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.764146 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.765511 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.765640 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.767136 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.767445 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-jpxgn"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.767927 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.768424 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.768693 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.768796 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-qldpr"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.769365 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.769386 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.770630 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.774301 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.775446 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-zp2gq"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.775577 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.775713 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.775819 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.775903 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.775969 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.777533 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.777554 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.777812 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-zp2gq" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.778059 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-rtj4v"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.778213 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.778352 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.778440 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.778470 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.778592 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.778685 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.778898 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.778693 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779202 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779229 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779295 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.778795 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779371 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.778888 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779425 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.778908 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779517 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779012 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779056 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779091 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779687 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779131 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779400 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779790 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779842 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779917 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779976 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.780027 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.780150 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.780236 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.780341 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.780357 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779606 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779926 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.780716 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.779654 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.780029 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.780968 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.804369 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.806972 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-8f46b"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.857774 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.858179 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.858879 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.858930 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859341 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/43e0986a-2fa8-4410-9a6d-1499f5840491-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-zcxnk\" (UID: \"43e0986a-2fa8-4410-9a6d-1499f5840491\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859376 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/73e450c1-7bc9-4502-b3c5-e7845ba29342-machine-approver-tls\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859401 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/974b6a63-5953-4683-8909-20b4a93856b1-encryption-config\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859423 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d20858ea-54b5-474f-bdd9-40eb83d42e57-trusted-ca\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859439 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-audit-policies\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.858933 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859459 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pq95k\" (UniqueName: \"kubernetes.io/projected/cb94a68f-794d-4e0f-9a65-aff1b885d021-kube-api-access-pq95k\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859522 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c12d008c-11de-489c-9553-175a76cbfef8-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-tb4rm\" (UID: \"c12d008c-11de-489c-9553-175a76cbfef8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859541 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/974b6a63-5953-4683-8909-20b4a93856b1-audit-policies\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859568 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb94a68f-794d-4e0f-9a65-aff1b885d021-config\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859590 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/974b6a63-5953-4683-8909-20b4a93856b1-serving-cert\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859613 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqr5k\" (UniqueName: \"kubernetes.io/projected/771549fe-a108-4fe9-a461-043432468961-kube-api-access-dqr5k\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859638 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/43e0986a-2fa8-4410-9a6d-1499f5840491-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-zcxnk\" (UID: \"43e0986a-2fa8-4410-9a6d-1499f5840491\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859676 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pdh8\" (UniqueName: \"kubernetes.io/projected/73e450c1-7bc9-4502-b3c5-e7845ba29342-kube-api-access-8pdh8\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859702 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/771549fe-a108-4fe9-a461-043432468961-serving-cert\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859723 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/974b6a63-5953-4683-8909-20b4a93856b1-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859743 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8rnb2\" (UID: \"5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859768 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-client-ca\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859790 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859813 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnpvq\" (UniqueName: \"kubernetes.io/projected/86f2da10-45a8-4cc4-9100-3e909d78274f-kube-api-access-gnpvq\") pod \"cluster-samples-operator-665b6dd947-58t4g\" (UID: \"86f2da10-45a8-4cc4-9100-3e909d78274f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859835 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sws26\" (UniqueName: \"kubernetes.io/projected/974b6a63-5953-4683-8909-20b4a93856b1-kube-api-access-sws26\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859881 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a91554fe-759f-4f9a-9d88-7b4d8650a08b-audit-dir\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859902 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859926 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/cb94a68f-794d-4e0f-9a65-aff1b885d021-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.859966 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkp4t\" (UniqueName: \"kubernetes.io/projected/d20858ea-54b5-474f-bdd9-40eb83d42e57-kube-api-access-xkp4t\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860024 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzb6q\" (UniqueName: \"kubernetes.io/projected/a91554fe-759f-4f9a-9d88-7b4d8650a08b-kube-api-access-tzb6q\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860049 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/771549fe-a108-4fe9-a461-043432468961-client-ca\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860072 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/974b6a63-5953-4683-8909-20b4a93856b1-audit-dir\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860106 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/98219d38-61a5-425b-8281-3d0b72e10c77-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860130 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/974b6a63-5953-4683-8909-20b4a93856b1-etcd-client\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860151 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860174 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860197 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnkxq\" (UniqueName: \"kubernetes.io/projected/1cc10407-5264-4bb5-8223-3ea9a4551c29-kube-api-access-qnkxq\") pod \"migrator-59844c95c7-c5sb2\" (UID: \"1cc10407-5264-4bb5-8223-3ea9a4551c29\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860219 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98219d38-61a5-425b-8281-3d0b72e10c77-config\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860240 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d20858ea-54b5-474f-bdd9-40eb83d42e57-serving-cert\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860262 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860292 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5jxm\" (UniqueName: \"kubernetes.io/projected/43e0986a-2fa8-4410-9a6d-1499f5840491-kube-api-access-c5jxm\") pod \"cluster-image-registry-operator-dc59b4c8b-zcxnk\" (UID: \"43e0986a-2fa8-4410-9a6d-1499f5840491\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860317 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/771549fe-a108-4fe9-a461-043432468961-config\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860346 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b-config\") pod \"kube-controller-manager-operator-78b949d7b-8rnb2\" (UID: \"5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860370 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/86f2da10-45a8-4cc4-9100-3e909d78274f-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-58t4g\" (UID: \"86f2da10-45a8-4cc4-9100-3e909d78274f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860393 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860414 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860438 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/73e450c1-7bc9-4502-b3c5-e7845ba29342-auth-proxy-config\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860460 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/cb94a68f-794d-4e0f-9a65-aff1b885d021-images\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860484 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860506 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860528 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8rnb2\" (UID: \"5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860549 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c12d008c-11de-489c-9553-175a76cbfef8-config\") pod \"openshift-apiserver-operator-796bbdcf4f-tb4rm\" (UID: \"c12d008c-11de-489c-9553-175a76cbfef8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860567 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzmvt\" (UniqueName: \"kubernetes.io/projected/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-kube-api-access-qzmvt\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860586 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d20858ea-54b5-474f-bdd9-40eb83d42e57-config\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860608 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7r7qz\" (UniqueName: \"kubernetes.io/projected/c12d008c-11de-489c-9553-175a76cbfef8-kube-api-access-7r7qz\") pod \"openshift-apiserver-operator-796bbdcf4f-tb4rm\" (UID: \"c12d008c-11de-489c-9553-175a76cbfef8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860629 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/98219d38-61a5-425b-8281-3d0b72e10c77-service-ca-bundle\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860654 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/43e0986a-2fa8-4410-9a6d-1499f5840491-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-zcxnk\" (UID: \"43e0986a-2fa8-4410-9a6d-1499f5840491\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860677 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73e450c1-7bc9-4502-b3c5-e7845ba29342-config\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860693 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/974b6a63-5953-4683-8909-20b4a93856b1-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860054 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860737 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pj4jp\" (UniqueName: \"kubernetes.io/projected/98219d38-61a5-425b-8281-3d0b72e10c77-kube-api-access-pj4jp\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860872 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-config\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860898 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-serving-cert\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860972 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/98219d38-61a5-425b-8281-3d0b72e10c77-serving-cert\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.860995 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.861017 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.861039 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.862233 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.863178 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.863286 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.863805 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.864254 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.865980 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.867003 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.867122 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.867539 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.867625 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.867811 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.867923 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.868533 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.869891 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.870512 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.870684 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-p6ggc"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.876641 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-m62h8"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.877232 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.877650 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-m62h8" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.877558 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.877499 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.877612 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.878896 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.881136 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.883322 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.884434 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.886692 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-88s4j"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.888374 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.888595 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.892415 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.892589 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-88s4j" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.892907 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kp5bk"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.893428 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-cghb5"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.893596 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.893595 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.894060 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.895471 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.895712 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.896097 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.901931 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xdpcj"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.907023 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.910107 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-7rm8d"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.916900 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.932228 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.935799 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.937858 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.948769 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.948816 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-p8pxn"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.949662 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-p8pxn" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.952234 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hbw4j"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.956698 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-b485d"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.956842 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.958669 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.958785 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.960253 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961653 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pdh8\" (UniqueName: \"kubernetes.io/projected/73e450c1-7bc9-4502-b3c5-e7845ba29342-kube-api-access-8pdh8\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961686 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/771549fe-a108-4fe9-a461-043432468961-serving-cert\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961711 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/974b6a63-5953-4683-8909-20b4a93856b1-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961732 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8rnb2\" (UID: \"5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961755 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-client-ca\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961776 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961800 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnpvq\" (UniqueName: \"kubernetes.io/projected/86f2da10-45a8-4cc4-9100-3e909d78274f-kube-api-access-gnpvq\") pod \"cluster-samples-operator-665b6dd947-58t4g\" (UID: \"86f2da10-45a8-4cc4-9100-3e909d78274f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961819 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sws26\" (UniqueName: \"kubernetes.io/projected/974b6a63-5953-4683-8909-20b4a93856b1-kube-api-access-sws26\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961840 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a91554fe-759f-4f9a-9d88-7b4d8650a08b-audit-dir\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961859 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961880 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/cb94a68f-794d-4e0f-9a65-aff1b885d021-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961900 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkp4t\" (UniqueName: \"kubernetes.io/projected/d20858ea-54b5-474f-bdd9-40eb83d42e57-kube-api-access-xkp4t\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961922 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzb6q\" (UniqueName: \"kubernetes.io/projected/a91554fe-759f-4f9a-9d88-7b4d8650a08b-kube-api-access-tzb6q\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961959 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/771549fe-a108-4fe9-a461-043432468961-client-ca\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.961980 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/974b6a63-5953-4683-8909-20b4a93856b1-audit-dir\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962011 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/98219d38-61a5-425b-8281-3d0b72e10c77-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962032 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/974b6a63-5953-4683-8909-20b4a93856b1-etcd-client\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962051 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962072 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962096 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnkxq\" (UniqueName: \"kubernetes.io/projected/1cc10407-5264-4bb5-8223-3ea9a4551c29-kube-api-access-qnkxq\") pod \"migrator-59844c95c7-c5sb2\" (UID: \"1cc10407-5264-4bb5-8223-3ea9a4551c29\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962117 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98219d38-61a5-425b-8281-3d0b72e10c77-config\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962137 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d20858ea-54b5-474f-bdd9-40eb83d42e57-serving-cert\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962161 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962191 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5jxm\" (UniqueName: \"kubernetes.io/projected/43e0986a-2fa8-4410-9a6d-1499f5840491-kube-api-access-c5jxm\") pod \"cluster-image-registry-operator-dc59b4c8b-zcxnk\" (UID: \"43e0986a-2fa8-4410-9a6d-1499f5840491\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962217 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/771549fe-a108-4fe9-a461-043432468961-config\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962249 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b-config\") pod \"kube-controller-manager-operator-78b949d7b-8rnb2\" (UID: \"5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962270 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/86f2da10-45a8-4cc4-9100-3e909d78274f-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-58t4g\" (UID: \"86f2da10-45a8-4cc4-9100-3e909d78274f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962290 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962313 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962333 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/73e450c1-7bc9-4502-b3c5-e7845ba29342-auth-proxy-config\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962354 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/cb94a68f-794d-4e0f-9a65-aff1b885d021-images\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962377 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962497 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962522 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8rnb2\" (UID: \"5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962571 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c12d008c-11de-489c-9553-175a76cbfef8-config\") pod \"openshift-apiserver-operator-796bbdcf4f-tb4rm\" (UID: \"c12d008c-11de-489c-9553-175a76cbfef8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962592 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzmvt\" (UniqueName: \"kubernetes.io/projected/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-kube-api-access-qzmvt\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962611 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d20858ea-54b5-474f-bdd9-40eb83d42e57-config\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962630 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7r7qz\" (UniqueName: \"kubernetes.io/projected/c12d008c-11de-489c-9553-175a76cbfef8-kube-api-access-7r7qz\") pod \"openshift-apiserver-operator-796bbdcf4f-tb4rm\" (UID: \"c12d008c-11de-489c-9553-175a76cbfef8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962652 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/98219d38-61a5-425b-8281-3d0b72e10c77-service-ca-bundle\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962672 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/43e0986a-2fa8-4410-9a6d-1499f5840491-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-zcxnk\" (UID: \"43e0986a-2fa8-4410-9a6d-1499f5840491\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962813 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73e450c1-7bc9-4502-b3c5-e7845ba29342-config\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.962851 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/974b6a63-5953-4683-8909-20b4a93856b1-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963013 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pj4jp\" (UniqueName: \"kubernetes.io/projected/98219d38-61a5-425b-8281-3d0b72e10c77-kube-api-access-pj4jp\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963038 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-config\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963058 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-serving-cert\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963090 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/98219d38-61a5-425b-8281-3d0b72e10c77-serving-cert\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963110 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963130 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963150 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963172 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/43e0986a-2fa8-4410-9a6d-1499f5840491-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-zcxnk\" (UID: \"43e0986a-2fa8-4410-9a6d-1499f5840491\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963192 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/73e450c1-7bc9-4502-b3c5-e7845ba29342-machine-approver-tls\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963212 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/974b6a63-5953-4683-8909-20b4a93856b1-encryption-config\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963233 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d20858ea-54b5-474f-bdd9-40eb83d42e57-trusted-ca\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963254 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-audit-policies\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963276 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pq95k\" (UniqueName: \"kubernetes.io/projected/cb94a68f-794d-4e0f-9a65-aff1b885d021-kube-api-access-pq95k\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963296 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c12d008c-11de-489c-9553-175a76cbfef8-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-tb4rm\" (UID: \"c12d008c-11de-489c-9553-175a76cbfef8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963317 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/974b6a63-5953-4683-8909-20b4a93856b1-audit-policies\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963337 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb94a68f-794d-4e0f-9a65-aff1b885d021-config\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963357 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/974b6a63-5953-4683-8909-20b4a93856b1-serving-cert\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963379 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqr5k\" (UniqueName: \"kubernetes.io/projected/771549fe-a108-4fe9-a461-043432468961-kube-api-access-dqr5k\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.963405 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/43e0986a-2fa8-4410-9a6d-1499f5840491-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-zcxnk\" (UID: \"43e0986a-2fa8-4410-9a6d-1499f5840491\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.966817 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-m62h8"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.966866 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.966878 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-rtj4v"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.968856 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/73e450c1-7bc9-4502-b3c5-e7845ba29342-auth-proxy-config\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.969572 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-qldpr"] Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.970755 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/98219d38-61a5-425b-8281-3d0b72e10c77-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.971361 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/974b6a63-5953-4683-8909-20b4a93856b1-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.972088 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.974805 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.974863 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/771549fe-a108-4fe9-a461-043432468961-serving-cert\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.976058 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/771549fe-a108-4fe9-a461-043432468961-config\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.976359 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.976675 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b-config\") pod \"kube-controller-manager-operator-78b949d7b-8rnb2\" (UID: \"5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.977159 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-client-ca\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.981089 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.984909 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.985157 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a91554fe-759f-4f9a-9d88-7b4d8650a08b-audit-dir\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.985718 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98219d38-61a5-425b-8281-3d0b72e10c77-config\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.985852 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/974b6a63-5953-4683-8909-20b4a93856b1-audit-dir\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.986346 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/771549fe-a108-4fe9-a461-043432468961-client-ca\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.993472 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d20858ea-54b5-474f-bdd9-40eb83d42e57-trusted-ca\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.995434 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/974b6a63-5953-4683-8909-20b4a93856b1-etcd-client\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.995787 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.996363 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/cb94a68f-794d-4e0f-9a65-aff1b885d021-images\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:41 crc kubenswrapper[5016]: I1211 10:35:41.997477 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.001266 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.002106 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/974b6a63-5953-4683-8909-20b4a93856b1-audit-policies\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.002116 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.002188 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-4cp4w"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.002484 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/98219d38-61a5-425b-8281-3d0b72e10c77-service-ca-bundle\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.002624 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-audit-policies\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.002968 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73e450c1-7bc9-4502-b3c5-e7845ba29342-config\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.003409 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/974b6a63-5953-4683-8909-20b4a93856b1-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.004522 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-config\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.005451 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.009648 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.010432 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb94a68f-794d-4e0f-9a65-aff1b885d021-config\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.010476 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.010506 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-jpxgn"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.011439 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.014350 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c12d008c-11de-489c-9553-175a76cbfef8-config\") pod \"openshift-apiserver-operator-796bbdcf4f-tb4rm\" (UID: \"c12d008c-11de-489c-9553-175a76cbfef8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.016354 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d20858ea-54b5-474f-bdd9-40eb83d42e57-config\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.016699 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/43e0986a-2fa8-4410-9a6d-1499f5840491-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-zcxnk\" (UID: \"43e0986a-2fa8-4410-9a6d-1499f5840491\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.017629 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/43e0986a-2fa8-4410-9a6d-1499f5840491-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-zcxnk\" (UID: \"43e0986a-2fa8-4410-9a6d-1499f5840491\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.020120 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.020512 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8rnb2\" (UID: \"5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.020917 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.025876 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d20858ea-54b5-474f-bdd9-40eb83d42e57-serving-cert\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.026109 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/98219d38-61a5-425b-8281-3d0b72e10c77-serving-cert\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.027599 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.027859 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-serving-cert\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.028040 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.028087 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/86f2da10-45a8-4cc4-9100-3e909d78274f-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-58t4g\" (UID: \"86f2da10-45a8-4cc4-9100-3e909d78274f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.028205 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/73e450c1-7bc9-4502-b3c5-e7845ba29342-machine-approver-tls\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.028250 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.028703 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4k8l5"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.029026 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c12d008c-11de-489c-9553-175a76cbfef8-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-tb4rm\" (UID: \"c12d008c-11de-489c-9553-175a76cbfef8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.029165 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.029832 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.030230 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/cb94a68f-794d-4e0f-9a65-aff1b885d021-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.030489 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.032495 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.032520 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.033616 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/974b6a63-5953-4683-8909-20b4a93856b1-serving-cert\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.033677 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-jbqpn"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.034492 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-zp2gq"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.034608 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.035745 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.038432 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/974b6a63-5953-4683-8909-20b4a93856b1-encryption-config\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.038510 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.038798 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-p6ggc"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.041298 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-88s4j"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.043990 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.044032 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-cghb5"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.044042 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.046561 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-tn6f4"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.047808 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.049033 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jbqpn"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.050729 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kp5bk"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.051098 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.052123 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.053696 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-fz98p"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.054458 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-fz98p" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.055684 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-sg9hf"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.063887 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-fz98p"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.064153 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.068388 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.069334 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-sg9hf"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.070728 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-tfj94"] Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.071533 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.086232 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.106861 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.127652 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.146905 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.167716 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.187016 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.207504 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.227412 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.246697 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.267311 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.286505 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.307509 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.327321 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.347728 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.367663 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.386921 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.406889 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.426568 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.448025 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.467574 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.473835 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.474060 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.473964 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.528104 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.546812 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.566978 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569464 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-encryption-config\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569536 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-serving-cert\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569573 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzprh\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-kube-api-access-jzprh\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569618 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/84d62237-3910-4eeb-845d-2d9c3c5a8d97-installation-pull-secrets\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569641 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-registry-tls\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569664 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-node-pullsecrets\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569684 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-audit\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569715 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-bound-sa-token\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569746 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569780 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-audit-dir\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569819 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/84d62237-3910-4eeb-845d-2d9c3c5a8d97-registry-certificates\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569851 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569872 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-image-import-ca\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569922 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-config\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569965 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-etcd-client\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.569993 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/84d62237-3910-4eeb-845d-2d9c3c5a8d97-ca-trust-extracted\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.570007 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-etcd-serving-ca\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.570060 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/84d62237-3910-4eeb-845d-2d9c3c5a8d97-trusted-ca\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.570099 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsmb8\" (UniqueName: \"kubernetes.io/projected/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-kube-api-access-tsmb8\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: E1211 10:35:42.570460 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.070445273 +0000 UTC m=+59.889004852 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.587457 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.607491 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.626587 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.647527 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.667226 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.670883 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:42 crc kubenswrapper[5016]: E1211 10:35:42.671090 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.171059631 +0000 UTC m=+59.989619210 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.671180 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzprh\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-kube-api-access-jzprh\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.671241 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-registry-tls\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.671261 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/84d62237-3910-4eeb-845d-2d9c3c5a8d97-installation-pull-secrets\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.671292 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/9176a2f3-177d-4e8e-80d6-688c3e76ed46-tmpfs\") pod \"packageserver-d55dfcdfc-zfqzs\" (UID: \"9176a2f3-177d-4e8e-80d6-688c3e76ed46\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.671821 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/70a267ab-51a0-4f69-a9b9-8b738ab364a9-srv-cert\") pod \"catalog-operator-68c6474976-88cmt\" (UID: \"70a267ab-51a0-4f69-a9b9-8b738ab364a9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.671856 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7de297d6-6330-4fd7-b290-b564881e8139-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-gljg8\" (UID: \"7de297d6-6330-4fd7-b290-b564881e8139\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.671873 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b3ab377b-889b-40df-984f-322d42490e57-serving-cert\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.671889 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxhwt\" (UniqueName: \"kubernetes.io/projected/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-kube-api-access-rxhwt\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.671950 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e-proxy-tls\") pod \"machine-config-controller-84d6567774-gkzj6\" (UID: \"d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.671976 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-bound-sa-token\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672115 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-socket-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672276 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d-proxy-tls\") pod \"machine-config-operator-74547568cd-rht9z\" (UID: \"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672307 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-trusted-ca-bundle\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672354 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672382 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4a914a7e-cc73-4d59-a122-e58d5f2da33b-metrics-certs\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672406 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9176a2f3-177d-4e8e-80d6-688c3e76ed46-apiservice-cert\") pod \"packageserver-d55dfcdfc-zfqzs\" (UID: \"9176a2f3-177d-4e8e-80d6-688c3e76ed46\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672448 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d6923b9a-6069-47eb-9513-ba0baa9d44a8-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-7xxn4\" (UID: \"d6923b9a-6069-47eb-9513-ba0baa9d44a8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672471 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d-auth-proxy-config\") pod \"machine-config-operator-74547568cd-rht9z\" (UID: \"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672496 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dcjv\" (UniqueName: \"kubernetes.io/projected/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-kube-api-access-5dcjv\") pod \"dns-default-jbqpn\" (UID: \"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb\") " pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672517 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chqvs\" (UniqueName: \"kubernetes.io/projected/70a267ab-51a0-4f69-a9b9-8b738ab364a9-kube-api-access-chqvs\") pod \"catalog-operator-68c6474976-88cmt\" (UID: \"70a267ab-51a0-4f69-a9b9-8b738ab364a9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672538 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vk5l\" (UniqueName: \"kubernetes.io/projected/e24b6dae-ea03-4141-a0ab-baf91f6b9ab8-kube-api-access-7vk5l\") pod \"dns-operator-744455d44c-zp2gq\" (UID: \"e24b6dae-ea03-4141-a0ab-baf91f6b9ab8\") " pod="openshift-dns-operator/dns-operator-744455d44c-zp2gq" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672558 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/4a914a7e-cc73-4d59-a122-e58d5f2da33b-stats-auth\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672583 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg45t\" (UniqueName: \"kubernetes.io/projected/fa3166f9-577e-4994-9290-7ced66d69dcc-kube-api-access-qg45t\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672609 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4whxq\" (UniqueName: \"kubernetes.io/projected/d6923b9a-6069-47eb-9513-ba0baa9d44a8-kube-api-access-4whxq\") pod \"openshift-controller-manager-operator-756b6f6bc6-7xxn4\" (UID: \"d6923b9a-6069-47eb-9513-ba0baa9d44a8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672635 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fa3166f9-577e-4994-9290-7ced66d69dcc-console-serving-cert\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: E1211 10:35:42.672662 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.17264959 +0000 UTC m=+59.991209169 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672688 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvnxb\" (UniqueName: \"kubernetes.io/projected/9226f28a-6db3-4421-9609-0d470c2f76a0-kube-api-access-pvnxb\") pod \"machine-config-server-p8pxn\" (UID: \"9226f28a-6db3-4421-9609-0d470c2f76a0\") " pod="openshift-machine-config-operator/machine-config-server-p8pxn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672711 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/01742c1a-692e-462f-a9b1-15dc72332645-metrics-tls\") pod \"ingress-operator-5b745b69d9-2b7gh\" (UID: \"01742c1a-692e-462f-a9b1-15dc72332645\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672742 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/84d62237-3910-4eeb-845d-2d9c3c5a8d97-registry-certificates\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672779 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttqrl\" (UniqueName: \"kubernetes.io/projected/06323b04-a206-4ee4-8cf3-b6a7a588e9de-kube-api-access-ttqrl\") pod \"ingress-canary-fz98p\" (UID: \"06323b04-a206-4ee4-8cf3-b6a7a588e9de\") " pod="openshift-ingress-canary/ingress-canary-fz98p" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672806 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-console-config\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672846 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kq9n\" (UniqueName: \"kubernetes.io/projected/d8539d49-e453-4b15-a4d6-0e0583b93390-kube-api-access-6kq9n\") pod \"marketplace-operator-79b997595-kp5bk\" (UID: \"d8539d49-e453-4b15-a4d6-0e0583b93390\") " pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672893 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/5cf7d929-5a97-4045-a7bd-3e92c172c7e3-signing-cabundle\") pod \"service-ca-9c57cc56f-p6ggc\" (UID: \"5cf7d929-5a97-4045-a7bd-3e92c172c7e3\") " pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672915 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06323b04-a206-4ee4-8cf3-b6a7a588e9de-cert\") pod \"ingress-canary-fz98p\" (UID: \"06323b04-a206-4ee4-8cf3-b6a7a588e9de\") " pod="openshift-ingress-canary/ingress-canary-fz98p" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.672978 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ab98cf68-77d0-4eb5-8ce0-8c3c52906582-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skczt\" (UID: \"ab98cf68-77d0-4eb5-8ce0-8c3c52906582\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.673005 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67dd196e-3271-4222-aa21-dfaf3278eee0-secret-volume\") pod \"collect-profiles-29424150-t79rp\" (UID: \"67dd196e-3271-4222-aa21-dfaf3278eee0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.673043 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-image-import-ca\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.673340 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b3ab377b-889b-40df-984f-322d42490e57-etcd-client\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.673603 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/9226f28a-6db3-4421-9609-0d470c2f76a0-certs\") pod \"machine-config-server-p8pxn\" (UID: \"9226f28a-6db3-4421-9609-0d470c2f76a0\") " pod="openshift-machine-config-operator/machine-config-server-p8pxn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.673644 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fa3166f9-577e-4994-9290-7ced66d69dcc-console-oauth-config\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.673666 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6923b9a-6069-47eb-9513-ba0baa9d44a8-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-7xxn4\" (UID: \"d6923b9a-6069-47eb-9513-ba0baa9d44a8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.673698 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/fae98298-ad95-4355-9f4c-0f1c159cb0f9-available-featuregates\") pod \"openshift-config-operator-7777fb866f-qldpr\" (UID: \"fae98298-ad95-4355-9f4c-0f1c159cb0f9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.673727 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/84d62237-3910-4eeb-845d-2d9c3c5a8d97-registry-certificates\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.673869 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-etcd-client\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.673951 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm4gz\" (UniqueName: \"kubernetes.io/projected/d4f98db3-0859-48bd-a024-ac6229ac9eeb-kube-api-access-fm4gz\") pod \"service-ca-operator-777779d784-cghb5\" (UID: \"d4f98db3-0859-48bd-a024-ac6229ac9eeb\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674261 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-image-import-ca\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674375 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnrvv\" (UniqueName: \"kubernetes.io/projected/01742c1a-692e-462f-a9b1-15dc72332645-kube-api-access-nnrvv\") pod \"ingress-operator-5b745b69d9-2b7gh\" (UID: \"01742c1a-692e-462f-a9b1-15dc72332645\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674408 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c9l9\" (UniqueName: \"kubernetes.io/projected/fc71fe56-8968-4acd-8ae4-50031e11e8db-kube-api-access-9c9l9\") pod \"olm-operator-6b444d44fb-vb5zj\" (UID: \"fc71fe56-8968-4acd-8ae4-50031e11e8db\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674470 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b3ab377b-889b-40df-984f-322d42490e57-etcd-ca\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674478 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-registry-tls\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674507 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwb2h\" (UniqueName: \"kubernetes.io/projected/b3ab377b-889b-40df-984f-322d42490e57-kube-api-access-dwb2h\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674546 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4a914a7e-cc73-4d59-a122-e58d5f2da33b-service-ca-bundle\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674578 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674602 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-config-volume\") pod \"dns-default-jbqpn\" (UID: \"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb\") " pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674625 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-plugins-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674628 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/84d62237-3910-4eeb-845d-2d9c3c5a8d97-installation-pull-secrets\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674689 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxz68\" (UniqueName: \"kubernetes.io/projected/9448161a-257a-46eb-b9f0-e3afac785b5d-kube-api-access-jxz68\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdl6x\" (UID: \"9448161a-257a-46eb-b9f0-e3afac785b5d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674730 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/1751c9db-e768-40b7-bc33-1b92ffa26c89-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9dqtm\" (UID: \"1751c9db-e768-40b7-bc33-1b92ffa26c89\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674758 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/4a914a7e-cc73-4d59-a122-e58d5f2da33b-default-certificate\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674789 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d-images\") pod \"machine-config-operator-74547568cd-rht9z\" (UID: \"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674821 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674920 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67dd196e-3271-4222-aa21-dfaf3278eee0-config-volume\") pod \"collect-profiles-29424150-t79rp\" (UID: \"67dd196e-3271-4222-aa21-dfaf3278eee0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.674998 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/84d62237-3910-4eeb-845d-2d9c3c5a8d97-trusted-ca\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675030 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk5ms\" (UniqueName: \"kubernetes.io/projected/4a914a7e-cc73-4d59-a122-e58d5f2da33b-kube-api-access-wk5ms\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675070 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhd6w\" (UniqueName: \"kubernetes.io/projected/b5f41ad0-ee53-4c72-b82d-64e630edd51f-kube-api-access-rhd6w\") pod \"multus-admission-controller-857f4d67dd-m62h8\" (UID: \"b5f41ad0-ee53-4c72-b82d-64e630edd51f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m62h8" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675108 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-encryption-config\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675137 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/5cf7d929-5a97-4045-a7bd-3e92c172c7e3-signing-key\") pod \"service-ca-9c57cc56f-p6ggc\" (UID: \"5cf7d929-5a97-4045-a7bd-3e92c172c7e3\") " pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675159 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/9226f28a-6db3-4421-9609-0d470c2f76a0-node-bootstrap-token\") pod \"machine-config-server-p8pxn\" (UID: \"9226f28a-6db3-4421-9609-0d470c2f76a0\") " pod="openshift-machine-config-operator/machine-config-server-p8pxn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675208 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-serving-cert\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675234 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wwf7\" (UniqueName: \"kubernetes.io/projected/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-kube-api-access-6wwf7\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675287 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fae98298-ad95-4355-9f4c-0f1c159cb0f9-serving-cert\") pod \"openshift-config-operator-7777fb866f-qldpr\" (UID: \"fae98298-ad95-4355-9f4c-0f1c159cb0f9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675329 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-node-pullsecrets\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675357 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-audit\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675381 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/01742c1a-692e-462f-a9b1-15dc72332645-trusted-ca\") pod \"ingress-operator-5b745b69d9-2b7gh\" (UID: \"01742c1a-692e-462f-a9b1-15dc72332645\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675432 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29jgk\" (UniqueName: \"kubernetes.io/projected/af75da0d-e4cb-4961-b57a-ea888c20af89-kube-api-access-29jgk\") pod \"control-plane-machine-set-operator-78cbb6b69f-ljcrh\" (UID: \"af75da0d-e4cb-4961-b57a-ea888c20af89\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675461 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9448161a-257a-46eb-b9f0-e3afac785b5d-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdl6x\" (UID: \"9448161a-257a-46eb-b9f0-e3afac785b5d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675484 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-949s8\" (UniqueName: \"kubernetes.io/projected/8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d-kube-api-access-949s8\") pod \"machine-config-operator-74547568cd-rht9z\" (UID: \"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675509 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/70a267ab-51a0-4f69-a9b9-8b738ab364a9-profile-collector-cert\") pod \"catalog-operator-68c6474976-88cmt\" (UID: \"70a267ab-51a0-4f69-a9b9-8b738ab364a9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675576 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9176a2f3-177d-4e8e-80d6-688c3e76ed46-webhook-cert\") pod \"packageserver-d55dfcdfc-zfqzs\" (UID: \"9176a2f3-177d-4e8e-80d6-688c3e76ed46\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675651 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-audit-dir\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675717 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-gkzj6\" (UID: \"d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675807 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-service-ca\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675838 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab98cf68-77d0-4eb5-8ce0-8c3c52906582-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skczt\" (UID: \"ab98cf68-77d0-4eb5-8ce0-8c3c52906582\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675885 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fp79\" (UniqueName: \"kubernetes.io/projected/d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e-kube-api-access-8fp79\") pod \"machine-config-controller-84d6567774-gkzj6\" (UID: \"d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675911 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb596\" (UniqueName: \"kubernetes.io/projected/67dd196e-3271-4222-aa21-dfaf3278eee0-kube-api-access-sb596\") pod \"collect-profiles-29424150-t79rp\" (UID: \"67dd196e-3271-4222-aa21-dfaf3278eee0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.675961 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqx7z\" (UniqueName: \"kubernetes.io/projected/1751c9db-e768-40b7-bc33-1b92ffa26c89-kube-api-access-pqx7z\") pod \"package-server-manager-789f6589d5-9dqtm\" (UID: \"1751c9db-e768-40b7-bc33-1b92ffa26c89\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.676126 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/84d62237-3910-4eeb-845d-2d9c3c5a8d97-trusted-ca\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.676275 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.676297 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhf76\" (UniqueName: \"kubernetes.io/projected/9176a2f3-177d-4e8e-80d6-688c3e76ed46-kube-api-access-vhf76\") pod \"packageserver-d55dfcdfc-zfqzs\" (UID: \"9176a2f3-177d-4e8e-80d6-688c3e76ed46\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.676356 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-mountpoint-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.676379 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-registration-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.676521 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/01742c1a-692e-462f-a9b1-15dc72332645-bound-sa-token\") pod \"ingress-operator-5b745b69d9-2b7gh\" (UID: \"01742c1a-692e-462f-a9b1-15dc72332645\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.676569 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ab98cf68-77d0-4eb5-8ce0-8c3c52906582-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skczt\" (UID: \"ab98cf68-77d0-4eb5-8ce0-8c3c52906582\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.677930 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-ready\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.677970 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-metrics-tls\") pod \"dns-default-jbqpn\" (UID: \"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb\") " pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.677989 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-config\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678007 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7de297d6-6330-4fd7-b290-b564881e8139-config\") pod \"kube-apiserver-operator-766d6c64bb-gljg8\" (UID: \"7de297d6-6330-4fd7-b290-b564881e8139\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678022 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxzs5\" (UniqueName: \"kubernetes.io/projected/5cf7d929-5a97-4045-a7bd-3e92c172c7e3-kube-api-access-hxzs5\") pod \"service-ca-9c57cc56f-p6ggc\" (UID: \"5cf7d929-5a97-4045-a7bd-3e92c172c7e3\") " pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678049 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4f98db3-0859-48bd-a024-ac6229ac9eeb-config\") pod \"service-ca-operator-777779d784-cghb5\" (UID: \"d4f98db3-0859-48bd-a024-ac6229ac9eeb\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678064 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e24b6dae-ea03-4141-a0ab-baf91f6b9ab8-metrics-tls\") pod \"dns-operator-744455d44c-zp2gq\" (UID: \"e24b6dae-ea03-4141-a0ab-baf91f6b9ab8\") " pod="openshift-dns-operator/dns-operator-744455d44c-zp2gq" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678081 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/af75da0d-e4cb-4961-b57a-ea888c20af89-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-ljcrh\" (UID: \"af75da0d-e4cb-4961-b57a-ea888c20af89\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678099 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-csi-data-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678120 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d8539d49-e453-4b15-a4d6-0e0583b93390-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kp5bk\" (UID: \"d8539d49-e453-4b15-a4d6-0e0583b93390\") " pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678139 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7de297d6-6330-4fd7-b290-b564881e8139-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-gljg8\" (UID: \"7de297d6-6330-4fd7-b290-b564881e8139\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678162 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/84d62237-3910-4eeb-845d-2d9c3c5a8d97-ca-trust-extracted\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678194 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d8539d49-e453-4b15-a4d6-0e0583b93390-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kp5bk\" (UID: \"d8539d49-e453-4b15-a4d6-0e0583b93390\") " pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678219 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85rm7\" (UniqueName: \"kubernetes.io/projected/fae98298-ad95-4355-9f4c-0f1c159cb0f9-kube-api-access-85rm7\") pod \"openshift-config-operator-7777fb866f-qldpr\" (UID: \"fae98298-ad95-4355-9f4c-0f1c159cb0f9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678239 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/fc71fe56-8968-4acd-8ae4-50031e11e8db-srv-cert\") pod \"olm-operator-6b444d44fb-vb5zj\" (UID: \"fc71fe56-8968-4acd-8ae4-50031e11e8db\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678256 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/fc71fe56-8968-4acd-8ae4-50031e11e8db-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vb5zj\" (UID: \"fc71fe56-8968-4acd-8ae4-50031e11e8db\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678276 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-etcd-serving-ca\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678299 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b3ab377b-889b-40df-984f-322d42490e57-etcd-service-ca\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678320 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-oauth-serving-cert\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678372 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3ab377b-889b-40df-984f-322d42490e57-config\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.676664 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-audit-dir\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.677335 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-node-pullsecrets\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.677486 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-etcd-client\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678835 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-serving-cert\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.678909 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-config\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.677820 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-audit\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.677890 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.679201 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/84d62237-3910-4eeb-845d-2d9c3c5a8d97-ca-trust-extracted\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.679720 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsmb8\" (UniqueName: \"kubernetes.io/projected/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-kube-api-access-tsmb8\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.679770 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/b5f41ad0-ee53-4c72-b82d-64e630edd51f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-m62h8\" (UID: \"b5f41ad0-ee53-4c72-b82d-64e630edd51f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m62h8" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.679829 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9448161a-257a-46eb-b9f0-e3afac785b5d-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdl6x\" (UID: \"9448161a-257a-46eb-b9f0-e3afac785b5d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.679860 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-encryption-config\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.679873 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4f98db3-0859-48bd-a024-ac6229ac9eeb-serving-cert\") pod \"service-ca-operator-777779d784-cghb5\" (UID: \"d4f98db3-0859-48bd-a024-ac6229ac9eeb\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.679962 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtsx8\" (UniqueName: \"kubernetes.io/projected/ed376fff-5d17-48b1-b48c-ec0c3548dde4-kube-api-access-wtsx8\") pod \"downloads-7954f5f757-88s4j\" (UID: \"ed376fff-5d17-48b1-b48c-ec0c3548dde4\") " pod="openshift-console/downloads-7954f5f757-88s4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.679829 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-etcd-serving-ca\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.689179 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.706885 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.727280 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.746905 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.768240 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.781698 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.781866 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/9176a2f3-177d-4e8e-80d6-688c3e76ed46-tmpfs\") pod \"packageserver-d55dfcdfc-zfqzs\" (UID: \"9176a2f3-177d-4e8e-80d6-688c3e76ed46\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:42 crc kubenswrapper[5016]: E1211 10:35:42.781914 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.281866781 +0000 UTC m=+60.100426400 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782004 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7de297d6-6330-4fd7-b290-b564881e8139-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-gljg8\" (UID: \"7de297d6-6330-4fd7-b290-b564881e8139\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782047 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b3ab377b-889b-40df-984f-322d42490e57-serving-cert\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782099 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxhwt\" (UniqueName: \"kubernetes.io/projected/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-kube-api-access-rxhwt\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782126 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/70a267ab-51a0-4f69-a9b9-8b738ab364a9-srv-cert\") pod \"catalog-operator-68c6474976-88cmt\" (UID: \"70a267ab-51a0-4f69-a9b9-8b738ab364a9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782158 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e-proxy-tls\") pod \"machine-config-controller-84d6567774-gkzj6\" (UID: \"d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782185 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-trusted-ca-bundle\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782245 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782282 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-socket-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782287 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/9176a2f3-177d-4e8e-80d6-688c3e76ed46-tmpfs\") pod \"packageserver-d55dfcdfc-zfqzs\" (UID: \"9176a2f3-177d-4e8e-80d6-688c3e76ed46\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782304 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d-proxy-tls\") pod \"machine-config-operator-74547568cd-rht9z\" (UID: \"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782348 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4a914a7e-cc73-4d59-a122-e58d5f2da33b-metrics-certs\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782377 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d-auth-proxy-config\") pod \"machine-config-operator-74547568cd-rht9z\" (UID: \"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782394 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9176a2f3-177d-4e8e-80d6-688c3e76ed46-apiservice-cert\") pod \"packageserver-d55dfcdfc-zfqzs\" (UID: \"9176a2f3-177d-4e8e-80d6-688c3e76ed46\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782409 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d6923b9a-6069-47eb-9513-ba0baa9d44a8-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-7xxn4\" (UID: \"d6923b9a-6069-47eb-9513-ba0baa9d44a8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782426 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dcjv\" (UniqueName: \"kubernetes.io/projected/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-kube-api-access-5dcjv\") pod \"dns-default-jbqpn\" (UID: \"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb\") " pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782440 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/4a914a7e-cc73-4d59-a122-e58d5f2da33b-stats-auth\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782472 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg45t\" (UniqueName: \"kubernetes.io/projected/fa3166f9-577e-4994-9290-7ced66d69dcc-kube-api-access-qg45t\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782489 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4whxq\" (UniqueName: \"kubernetes.io/projected/d6923b9a-6069-47eb-9513-ba0baa9d44a8-kube-api-access-4whxq\") pod \"openshift-controller-manager-operator-756b6f6bc6-7xxn4\" (UID: \"d6923b9a-6069-47eb-9513-ba0baa9d44a8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782506 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chqvs\" (UniqueName: \"kubernetes.io/projected/70a267ab-51a0-4f69-a9b9-8b738ab364a9-kube-api-access-chqvs\") pod \"catalog-operator-68c6474976-88cmt\" (UID: \"70a267ab-51a0-4f69-a9b9-8b738ab364a9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782522 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vk5l\" (UniqueName: \"kubernetes.io/projected/e24b6dae-ea03-4141-a0ab-baf91f6b9ab8-kube-api-access-7vk5l\") pod \"dns-operator-744455d44c-zp2gq\" (UID: \"e24b6dae-ea03-4141-a0ab-baf91f6b9ab8\") " pod="openshift-dns-operator/dns-operator-744455d44c-zp2gq" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782538 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvnxb\" (UniqueName: \"kubernetes.io/projected/9226f28a-6db3-4421-9609-0d470c2f76a0-kube-api-access-pvnxb\") pod \"machine-config-server-p8pxn\" (UID: \"9226f28a-6db3-4421-9609-0d470c2f76a0\") " pod="openshift-machine-config-operator/machine-config-server-p8pxn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782556 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/01742c1a-692e-462f-a9b1-15dc72332645-metrics-tls\") pod \"ingress-operator-5b745b69d9-2b7gh\" (UID: \"01742c1a-692e-462f-a9b1-15dc72332645\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782570 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fa3166f9-577e-4994-9290-7ced66d69dcc-console-serving-cert\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782592 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttqrl\" (UniqueName: \"kubernetes.io/projected/06323b04-a206-4ee4-8cf3-b6a7a588e9de-kube-api-access-ttqrl\") pod \"ingress-canary-fz98p\" (UID: \"06323b04-a206-4ee4-8cf3-b6a7a588e9de\") " pod="openshift-ingress-canary/ingress-canary-fz98p" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782608 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-console-config\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782625 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kq9n\" (UniqueName: \"kubernetes.io/projected/d8539d49-e453-4b15-a4d6-0e0583b93390-kube-api-access-6kq9n\") pod \"marketplace-operator-79b997595-kp5bk\" (UID: \"d8539d49-e453-4b15-a4d6-0e0583b93390\") " pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782629 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-socket-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782642 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67dd196e-3271-4222-aa21-dfaf3278eee0-secret-volume\") pod \"collect-profiles-29424150-t79rp\" (UID: \"67dd196e-3271-4222-aa21-dfaf3278eee0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782672 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/5cf7d929-5a97-4045-a7bd-3e92c172c7e3-signing-cabundle\") pod \"service-ca-9c57cc56f-p6ggc\" (UID: \"5cf7d929-5a97-4045-a7bd-3e92c172c7e3\") " pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782698 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06323b04-a206-4ee4-8cf3-b6a7a588e9de-cert\") pod \"ingress-canary-fz98p\" (UID: \"06323b04-a206-4ee4-8cf3-b6a7a588e9de\") " pod="openshift-ingress-canary/ingress-canary-fz98p" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782721 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ab98cf68-77d0-4eb5-8ce0-8c3c52906582-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skczt\" (UID: \"ab98cf68-77d0-4eb5-8ce0-8c3c52906582\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782755 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b3ab377b-889b-40df-984f-322d42490e57-etcd-client\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782800 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/fae98298-ad95-4355-9f4c-0f1c159cb0f9-available-featuregates\") pod \"openshift-config-operator-7777fb866f-qldpr\" (UID: \"fae98298-ad95-4355-9f4c-0f1c159cb0f9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782823 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/9226f28a-6db3-4421-9609-0d470c2f76a0-certs\") pod \"machine-config-server-p8pxn\" (UID: \"9226f28a-6db3-4421-9609-0d470c2f76a0\") " pod="openshift-machine-config-operator/machine-config-server-p8pxn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782845 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fa3166f9-577e-4994-9290-7ced66d69dcc-console-oauth-config\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782869 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6923b9a-6069-47eb-9513-ba0baa9d44a8-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-7xxn4\" (UID: \"d6923b9a-6069-47eb-9513-ba0baa9d44a8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.782930 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm4gz\" (UniqueName: \"kubernetes.io/projected/d4f98db3-0859-48bd-a024-ac6229ac9eeb-kube-api-access-fm4gz\") pod \"service-ca-operator-777779d784-cghb5\" (UID: \"d4f98db3-0859-48bd-a024-ac6229ac9eeb\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783018 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnrvv\" (UniqueName: \"kubernetes.io/projected/01742c1a-692e-462f-a9b1-15dc72332645-kube-api-access-nnrvv\") pod \"ingress-operator-5b745b69d9-2b7gh\" (UID: \"01742c1a-692e-462f-a9b1-15dc72332645\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783073 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c9l9\" (UniqueName: \"kubernetes.io/projected/fc71fe56-8968-4acd-8ae4-50031e11e8db-kube-api-access-9c9l9\") pod \"olm-operator-6b444d44fb-vb5zj\" (UID: \"fc71fe56-8968-4acd-8ae4-50031e11e8db\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783097 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783140 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-config-volume\") pod \"dns-default-jbqpn\" (UID: \"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb\") " pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783164 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-plugins-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783188 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b3ab377b-889b-40df-984f-322d42490e57-etcd-ca\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783237 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwb2h\" (UniqueName: \"kubernetes.io/projected/b3ab377b-889b-40df-984f-322d42490e57-kube-api-access-dwb2h\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783262 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4a914a7e-cc73-4d59-a122-e58d5f2da33b-service-ca-bundle\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783315 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxz68\" (UniqueName: \"kubernetes.io/projected/9448161a-257a-46eb-b9f0-e3afac785b5d-kube-api-access-jxz68\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdl6x\" (UID: \"9448161a-257a-46eb-b9f0-e3afac785b5d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783354 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/1751c9db-e768-40b7-bc33-1b92ffa26c89-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9dqtm\" (UID: \"1751c9db-e768-40b7-bc33-1b92ffa26c89\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783406 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/4a914a7e-cc73-4d59-a122-e58d5f2da33b-default-certificate\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783432 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d-images\") pod \"machine-config-operator-74547568cd-rht9z\" (UID: \"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783487 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783558 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67dd196e-3271-4222-aa21-dfaf3278eee0-config-volume\") pod \"collect-profiles-29424150-t79rp\" (UID: \"67dd196e-3271-4222-aa21-dfaf3278eee0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783590 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk5ms\" (UniqueName: \"kubernetes.io/projected/4a914a7e-cc73-4d59-a122-e58d5f2da33b-kube-api-access-wk5ms\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783616 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-trusted-ca-bundle\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783644 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhd6w\" (UniqueName: \"kubernetes.io/projected/b5f41ad0-ee53-4c72-b82d-64e630edd51f-kube-api-access-rhd6w\") pod \"multus-admission-controller-857f4d67dd-m62h8\" (UID: \"b5f41ad0-ee53-4c72-b82d-64e630edd51f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m62h8" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783674 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/9226f28a-6db3-4421-9609-0d470c2f76a0-node-bootstrap-token\") pod \"machine-config-server-p8pxn\" (UID: \"9226f28a-6db3-4421-9609-0d470c2f76a0\") " pod="openshift-machine-config-operator/machine-config-server-p8pxn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783732 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/5cf7d929-5a97-4045-a7bd-3e92c172c7e3-signing-key\") pod \"service-ca-9c57cc56f-p6ggc\" (UID: \"5cf7d929-5a97-4045-a7bd-3e92c172c7e3\") " pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783770 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wwf7\" (UniqueName: \"kubernetes.io/projected/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-kube-api-access-6wwf7\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783863 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fae98298-ad95-4355-9f4c-0f1c159cb0f9-serving-cert\") pod \"openshift-config-operator-7777fb866f-qldpr\" (UID: \"fae98298-ad95-4355-9f4c-0f1c159cb0f9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.783931 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/01742c1a-692e-462f-a9b1-15dc72332645-trusted-ca\") pod \"ingress-operator-5b745b69d9-2b7gh\" (UID: \"01742c1a-692e-462f-a9b1-15dc72332645\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784000 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29jgk\" (UniqueName: \"kubernetes.io/projected/af75da0d-e4cb-4961-b57a-ea888c20af89-kube-api-access-29jgk\") pod \"control-plane-machine-set-operator-78cbb6b69f-ljcrh\" (UID: \"af75da0d-e4cb-4961-b57a-ea888c20af89\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784023 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9448161a-257a-46eb-b9f0-e3afac785b5d-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdl6x\" (UID: \"9448161a-257a-46eb-b9f0-e3afac785b5d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784083 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-949s8\" (UniqueName: \"kubernetes.io/projected/8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d-kube-api-access-949s8\") pod \"machine-config-operator-74547568cd-rht9z\" (UID: \"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784108 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/70a267ab-51a0-4f69-a9b9-8b738ab364a9-profile-collector-cert\") pod \"catalog-operator-68c6474976-88cmt\" (UID: \"70a267ab-51a0-4f69-a9b9-8b738ab364a9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784171 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9176a2f3-177d-4e8e-80d6-688c3e76ed46-webhook-cert\") pod \"packageserver-d55dfcdfc-zfqzs\" (UID: \"9176a2f3-177d-4e8e-80d6-688c3e76ed46\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784201 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-gkzj6\" (UID: \"d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784233 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b3ab377b-889b-40df-984f-322d42490e57-etcd-ca\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784232 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d-auth-proxy-config\") pod \"machine-config-operator-74547568cd-rht9z\" (UID: \"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784240 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-service-ca\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784308 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784312 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab98cf68-77d0-4eb5-8ce0-8c3c52906582-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skczt\" (UID: \"ab98cf68-77d0-4eb5-8ce0-8c3c52906582\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784411 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqx7z\" (UniqueName: \"kubernetes.io/projected/1751c9db-e768-40b7-bc33-1b92ffa26c89-kube-api-access-pqx7z\") pod \"package-server-manager-789f6589d5-9dqtm\" (UID: \"1751c9db-e768-40b7-bc33-1b92ffa26c89\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784474 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fp79\" (UniqueName: \"kubernetes.io/projected/d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e-kube-api-access-8fp79\") pod \"machine-config-controller-84d6567774-gkzj6\" (UID: \"d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784495 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb596\" (UniqueName: \"kubernetes.io/projected/67dd196e-3271-4222-aa21-dfaf3278eee0-kube-api-access-sb596\") pod \"collect-profiles-29424150-t79rp\" (UID: \"67dd196e-3271-4222-aa21-dfaf3278eee0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784542 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhf76\" (UniqueName: \"kubernetes.io/projected/9176a2f3-177d-4e8e-80d6-688c3e76ed46-kube-api-access-vhf76\") pod \"packageserver-d55dfcdfc-zfqzs\" (UID: \"9176a2f3-177d-4e8e-80d6-688c3e76ed46\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784563 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-registration-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784576 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-plugins-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784580 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/01742c1a-692e-462f-a9b1-15dc72332645-bound-sa-token\") pod \"ingress-operator-5b745b69d9-2b7gh\" (UID: \"01742c1a-692e-462f-a9b1-15dc72332645\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784630 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-mountpoint-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784701 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ab98cf68-77d0-4eb5-8ce0-8c3c52906582-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skczt\" (UID: \"ab98cf68-77d0-4eb5-8ce0-8c3c52906582\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784731 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-ready\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784757 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-metrics-tls\") pod \"dns-default-jbqpn\" (UID: \"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb\") " pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784781 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7de297d6-6330-4fd7-b290-b564881e8139-config\") pod \"kube-apiserver-operator-766d6c64bb-gljg8\" (UID: \"7de297d6-6330-4fd7-b290-b564881e8139\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784807 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxzs5\" (UniqueName: \"kubernetes.io/projected/5cf7d929-5a97-4045-a7bd-3e92c172c7e3-kube-api-access-hxzs5\") pod \"service-ca-9c57cc56f-p6ggc\" (UID: \"5cf7d929-5a97-4045-a7bd-3e92c172c7e3\") " pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784828 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4f98db3-0859-48bd-a024-ac6229ac9eeb-config\") pod \"service-ca-operator-777779d784-cghb5\" (UID: \"d4f98db3-0859-48bd-a024-ac6229ac9eeb\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784849 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e24b6dae-ea03-4141-a0ab-baf91f6b9ab8-metrics-tls\") pod \"dns-operator-744455d44c-zp2gq\" (UID: \"e24b6dae-ea03-4141-a0ab-baf91f6b9ab8\") " pod="openshift-dns-operator/dns-operator-744455d44c-zp2gq" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784871 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7de297d6-6330-4fd7-b290-b564881e8139-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-gljg8\" (UID: \"7de297d6-6330-4fd7-b290-b564881e8139\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784896 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/af75da0d-e4cb-4961-b57a-ea888c20af89-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-ljcrh\" (UID: \"af75da0d-e4cb-4961-b57a-ea888c20af89\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784920 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-csi-data-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784959 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d8539d49-e453-4b15-a4d6-0e0583b93390-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kp5bk\" (UID: \"d8539d49-e453-4b15-a4d6-0e0583b93390\") " pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.784986 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/fc71fe56-8968-4acd-8ae4-50031e11e8db-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vb5zj\" (UID: \"fc71fe56-8968-4acd-8ae4-50031e11e8db\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.785010 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d8539d49-e453-4b15-a4d6-0e0583b93390-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kp5bk\" (UID: \"d8539d49-e453-4b15-a4d6-0e0583b93390\") " pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.785039 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-registration-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.785055 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-service-ca\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.785049 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85rm7\" (UniqueName: \"kubernetes.io/projected/fae98298-ad95-4355-9f4c-0f1c159cb0f9-kube-api-access-85rm7\") pod \"openshift-config-operator-7777fb866f-qldpr\" (UID: \"fae98298-ad95-4355-9f4c-0f1c159cb0f9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.785146 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/fc71fe56-8968-4acd-8ae4-50031e11e8db-srv-cert\") pod \"olm-operator-6b444d44fb-vb5zj\" (UID: \"fc71fe56-8968-4acd-8ae4-50031e11e8db\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.785234 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b3ab377b-889b-40df-984f-322d42490e57-etcd-service-ca\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.785263 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-oauth-serving-cert\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.785320 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3ab377b-889b-40df-984f-322d42490e57-config\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.785388 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/b5f41ad0-ee53-4c72-b82d-64e630edd51f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-m62h8\" (UID: \"b5f41ad0-ee53-4c72-b82d-64e630edd51f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m62h8" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.785421 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9448161a-257a-46eb-b9f0-e3afac785b5d-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdl6x\" (UID: \"9448161a-257a-46eb-b9f0-e3afac785b5d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" Dec 11 10:35:42 crc kubenswrapper[5016]: E1211 10:35:42.785504 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.28548923 +0000 UTC m=+60.104048809 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.785515 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-ready\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.787248 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e-proxy-tls\") pod \"machine-config-controller-84d6567774-gkzj6\" (UID: \"d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.787391 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4a914a7e-cc73-4d59-a122-e58d5f2da33b-metrics-certs\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.787428 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3ab377b-889b-40df-984f-322d42490e57-config\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.787655 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-oauth-serving-cert\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.787724 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67dd196e-3271-4222-aa21-dfaf3278eee0-secret-volume\") pod \"collect-profiles-29424150-t79rp\" (UID: \"67dd196e-3271-4222-aa21-dfaf3278eee0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.787795 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/01742c1a-692e-462f-a9b1-15dc72332645-trusted-ca\") pod \"ingress-operator-5b745b69d9-2b7gh\" (UID: \"01742c1a-692e-462f-a9b1-15dc72332645\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.788170 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-gkzj6\" (UID: \"d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.785154 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-mountpoint-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.788261 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-csi-data-dir\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.788279 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-console-config\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.788319 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4f98db3-0859-48bd-a024-ac6229ac9eeb-serving-cert\") pod \"service-ca-operator-777779d784-cghb5\" (UID: \"d4f98db3-0859-48bd-a024-ac6229ac9eeb\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.788335 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9448161a-257a-46eb-b9f0-e3afac785b5d-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdl6x\" (UID: \"9448161a-257a-46eb-b9f0-e3afac785b5d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.788448 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtsx8\" (UniqueName: \"kubernetes.io/projected/ed376fff-5d17-48b1-b48c-ec0c3548dde4-kube-api-access-wtsx8\") pod \"downloads-7954f5f757-88s4j\" (UID: \"ed376fff-5d17-48b1-b48c-ec0c3548dde4\") " pod="openshift-console/downloads-7954f5f757-88s4j" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.788476 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9448161a-257a-46eb-b9f0-e3afac785b5d-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdl6x\" (UID: \"9448161a-257a-46eb-b9f0-e3afac785b5d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.788493 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/fae98298-ad95-4355-9f4c-0f1c159cb0f9-available-featuregates\") pod \"openshift-config-operator-7777fb866f-qldpr\" (UID: \"fae98298-ad95-4355-9f4c-0f1c159cb0f9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.788696 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.788714 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fa3166f9-577e-4994-9290-7ced66d69dcc-console-serving-cert\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.789234 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6923b9a-6069-47eb-9513-ba0baa9d44a8-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-7xxn4\" (UID: \"d6923b9a-6069-47eb-9513-ba0baa9d44a8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.789417 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/70a267ab-51a0-4f69-a9b9-8b738ab364a9-profile-collector-cert\") pod \"catalog-operator-68c6474976-88cmt\" (UID: \"70a267ab-51a0-4f69-a9b9-8b738ab364a9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.789667 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fae98298-ad95-4355-9f4c-0f1c159cb0f9-serving-cert\") pod \"openshift-config-operator-7777fb866f-qldpr\" (UID: \"fae98298-ad95-4355-9f4c-0f1c159cb0f9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.789684 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/01742c1a-692e-462f-a9b1-15dc72332645-metrics-tls\") pod \"ingress-operator-5b745b69d9-2b7gh\" (UID: \"01742c1a-692e-462f-a9b1-15dc72332645\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.789697 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e24b6dae-ea03-4141-a0ab-baf91f6b9ab8-metrics-tls\") pod \"dns-operator-744455d44c-zp2gq\" (UID: \"e24b6dae-ea03-4141-a0ab-baf91f6b9ab8\") " pod="openshift-dns-operator/dns-operator-744455d44c-zp2gq" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.790088 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b3ab377b-889b-40df-984f-322d42490e57-etcd-service-ca\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.790256 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d6923b9a-6069-47eb-9513-ba0baa9d44a8-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-7xxn4\" (UID: \"d6923b9a-6069-47eb-9513-ba0baa9d44a8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.790503 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b3ab377b-889b-40df-984f-322d42490e57-serving-cert\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.790821 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/4a914a7e-cc73-4d59-a122-e58d5f2da33b-default-certificate\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.791024 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/fc71fe56-8968-4acd-8ae4-50031e11e8db-srv-cert\") pod \"olm-operator-6b444d44fb-vb5zj\" (UID: \"fc71fe56-8968-4acd-8ae4-50031e11e8db\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.791488 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/4a914a7e-cc73-4d59-a122-e58d5f2da33b-stats-auth\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.792042 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/fc71fe56-8968-4acd-8ae4-50031e11e8db-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vb5zj\" (UID: \"fc71fe56-8968-4acd-8ae4-50031e11e8db\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.792609 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fa3166f9-577e-4994-9290-7ced66d69dcc-console-oauth-config\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.792780 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b3ab377b-889b-40df-984f-322d42490e57-etcd-client\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.795059 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4a914a7e-cc73-4d59-a122-e58d5f2da33b-service-ca-bundle\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.808116 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.817999 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9176a2f3-177d-4e8e-80d6-688c3e76ed46-apiservice-cert\") pod \"packageserver-d55dfcdfc-zfqzs\" (UID: \"9176a2f3-177d-4e8e-80d6-688c3e76ed46\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.819735 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9176a2f3-177d-4e8e-80d6-688c3e76ed46-webhook-cert\") pod \"packageserver-d55dfcdfc-zfqzs\" (UID: \"9176a2f3-177d-4e8e-80d6-688c3e76ed46\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.826500 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.835110 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab98cf68-77d0-4eb5-8ce0-8c3c52906582-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skczt\" (UID: \"ab98cf68-77d0-4eb5-8ce0-8c3c52906582\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.847157 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.867454 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.877089 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ab98cf68-77d0-4eb5-8ce0-8c3c52906582-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skczt\" (UID: \"ab98cf68-77d0-4eb5-8ce0-8c3c52906582\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.884616 5016 request.go:700] Waited for 1.015728332s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.886247 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.889695 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:42 crc kubenswrapper[5016]: E1211 10:35:42.889792 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.389774519 +0000 UTC m=+60.208334098 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.890224 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:42 crc kubenswrapper[5016]: E1211 10:35:42.890636 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.390613119 +0000 UTC m=+60.209172698 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.907818 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.926573 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.931485 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/af75da0d-e4cb-4961-b57a-ea888c20af89-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-ljcrh\" (UID: \"af75da0d-e4cb-4961-b57a-ea888c20af89\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.947029 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.967312 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.987466 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.991626 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:42 crc kubenswrapper[5016]: E1211 10:35:42.992265 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.492245393 +0000 UTC m=+60.310804972 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:42 crc kubenswrapper[5016]: I1211 10:35:42.995507 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/5cf7d929-5a97-4045-a7bd-3e92c172c7e3-signing-cabundle\") pod \"service-ca-9c57cc56f-p6ggc\" (UID: \"5cf7d929-5a97-4045-a7bd-3e92c172c7e3\") " pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.006619 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.026387 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.031077 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/5cf7d929-5a97-4045-a7bd-3e92c172c7e3-signing-key\") pod \"service-ca-9c57cc56f-p6ggc\" (UID: \"5cf7d929-5a97-4045-a7bd-3e92c172c7e3\") " pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.046603 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.066620 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.071077 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/b5f41ad0-ee53-4c72-b82d-64e630edd51f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-m62h8\" (UID: \"b5f41ad0-ee53-4c72-b82d-64e630edd51f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m62h8" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.087510 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.093686 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.094071 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.59405771 +0000 UTC m=+60.412617289 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.094847 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d-images\") pod \"machine-config-operator-74547568cd-rht9z\" (UID: \"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.106963 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.127779 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.136056 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d-proxy-tls\") pod \"machine-config-operator-74547568cd-rht9z\" (UID: \"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.147578 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.161576 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/1751c9db-e768-40b7-bc33-1b92ffa26c89-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9dqtm\" (UID: \"1751c9db-e768-40b7-bc33-1b92ffa26c89\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.167072 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.187205 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.192421 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d8539d49-e453-4b15-a4d6-0e0583b93390-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kp5bk\" (UID: \"d8539d49-e453-4b15-a4d6-0e0583b93390\") " pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.194473 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.194579 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.694558606 +0000 UTC m=+60.513118195 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.195036 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.195532 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.6955118 +0000 UTC m=+60.514071389 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.206833 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.227742 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.247065 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.267655 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.275547 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7de297d6-6330-4fd7-b290-b564881e8139-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-gljg8\" (UID: \"7de297d6-6330-4fd7-b290-b564881e8139\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.287742 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.296003 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.296164 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.796143299 +0000 UTC m=+60.614702878 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.296390 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.296828 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.796812145 +0000 UTC m=+60.615371724 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.307394 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.333969 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.340110 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d8539d49-e453-4b15-a4d6-0e0583b93390-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kp5bk\" (UID: \"d8539d49-e453-4b15-a4d6-0e0583b93390\") " pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.346954 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.357880 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7de297d6-6330-4fd7-b290-b564881e8139-config\") pod \"kube-apiserver-operator-766d6c64bb-gljg8\" (UID: \"7de297d6-6330-4fd7-b290-b564881e8139\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.367427 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.371007 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4f98db3-0859-48bd-a024-ac6229ac9eeb-serving-cert\") pod \"service-ca-operator-777779d784-cghb5\" (UID: \"d4f98db3-0859-48bd-a024-ac6229ac9eeb\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.387058 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.397977 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.398137 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.898116792 +0000 UTC m=+60.716676371 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.398211 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.398587 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.898579253 +0000 UTC m=+60.717138832 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.406842 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.416371 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4f98db3-0859-48bd-a024-ac6229ac9eeb-config\") pod \"service-ca-operator-777779d784-cghb5\" (UID: \"d4f98db3-0859-48bd-a024-ac6229ac9eeb\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.434920 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.445782 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/70a267ab-51a0-4f69-a9b9-8b738ab364a9-srv-cert\") pod \"catalog-operator-68c6474976-88cmt\" (UID: \"70a267ab-51a0-4f69-a9b9-8b738ab364a9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.448535 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.467164 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.473522 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.487525 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.499294 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.499454 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.999433557 +0000 UTC m=+60.817993136 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.499513 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.499830 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:43.999822746 +0000 UTC m=+60.818382325 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.507583 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.517208 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67dd196e-3271-4222-aa21-dfaf3278eee0-config-volume\") pod \"collect-profiles-29424150-t79rp\" (UID: \"67dd196e-3271-4222-aa21-dfaf3278eee0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.526853 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.547569 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.551142 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/9226f28a-6db3-4421-9609-0d470c2f76a0-node-bootstrap-token\") pod \"machine-config-server-p8pxn\" (UID: \"9226f28a-6db3-4421-9609-0d470c2f76a0\") " pod="openshift-machine-config-operator/machine-config-server-p8pxn" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.567398 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.572060 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/9226f28a-6db3-4421-9609-0d470c2f76a0-certs\") pod \"machine-config-server-p8pxn\" (UID: \"9226f28a-6db3-4421-9609-0d470c2f76a0\") " pod="openshift-machine-config-operator/machine-config-server-p8pxn" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.600637 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.600776 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.100760833 +0000 UTC m=+60.919320412 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.601192 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.601580 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.101572853 +0000 UTC m=+60.920132432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.620021 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5jxm\" (UniqueName: \"kubernetes.io/projected/43e0986a-2fa8-4410-9a6d-1499f5840491-kube-api-access-c5jxm\") pod \"cluster-image-registry-operator-dc59b4c8b-zcxnk\" (UID: \"43e0986a-2fa8-4410-9a6d-1499f5840491\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.702541 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.702719 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.202696464 +0000 UTC m=+61.021256043 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.703216 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.703506 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.203499214 +0000 UTC m=+61.022058793 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.761003 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/43e0986a-2fa8-4410-9a6d-1499f5840491-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-zcxnk\" (UID: \"43e0986a-2fa8-4410-9a6d-1499f5840491\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.783684 5016 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.783780 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/06323b04-a206-4ee4-8cf3-b6a7a588e9de-cert podName:06323b04-a206-4ee4-8cf3-b6a7a588e9de nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.283757701 +0000 UTC m=+61.102317290 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/06323b04-a206-4ee4-8cf3-b6a7a588e9de-cert") pod "ingress-canary-fz98p" (UID: "06323b04-a206-4ee4-8cf3-b6a7a588e9de") : failed to sync secret cache: timed out waiting for the condition Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.784890 5016 configmap.go:193] Couldn't get configMap openshift-dns/dns-default: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.784924 5016 configmap.go:193] Couldn't get configMap openshift-multus/cni-sysctl-allowlist: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.784965 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-config-volume podName:a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.28495023 +0000 UTC m=+61.103509809 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-config-volume") pod "dns-default-jbqpn" (UID: "a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.785040 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-cni-sysctl-allowlist podName:ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.285023122 +0000 UTC m=+61.103582701 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cni-sysctl-allowlist" (UniqueName: "kubernetes.io/configmap/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-cni-sysctl-allowlist") pod "cni-sysctl-allowlist-ds-tfj94" (UID: "ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.786149 5016 secret.go:188] Couldn't get secret openshift-dns/dns-default-metrics-tls: failed to sync secret cache: timed out waiting for the condition Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.786215 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-metrics-tls podName:a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.286203291 +0000 UTC m=+61.104762940 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-metrics-tls") pod "dns-default-jbqpn" (UID: "a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb") : failed to sync secret cache: timed out waiting for the condition Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.804138 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.804343 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.304314768 +0000 UTC m=+61.122874347 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.804823 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.805281 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.30526506 +0000 UTC m=+61.123824749 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.867190 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.884730 5016 request.go:700] Waited for 1.849724766s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns/secrets?fieldSelector=metadata.name%3Ddns-default-metrics-tls&limit=500&resourceVersion=0 Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.886815 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.905854 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.906034 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.406008422 +0000 UTC m=+61.224568001 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.906331 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:43 crc kubenswrapper[5016]: E1211 10:35:43.906710 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.40669614 +0000 UTC m=+61.225255719 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.907366 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.927781 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.948003 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.968097 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 11 10:35:43 crc kubenswrapper[5016]: I1211 10:35:43.987578 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.006980 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.007291 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.007439 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.507414821 +0000 UTC m=+61.325974400 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.007856 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.008263 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.508255551 +0000 UTC m=+61.326815130 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.028053 5016 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.046482 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.066972 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-sysctl-allowlist" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.087178 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.106282 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.108708 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.109070 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.609045604 +0000 UTC m=+61.427605193 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.109391 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.109736 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.609719721 +0000 UTC m=+61.428279300 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.126576 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.147385 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.166897 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.186887 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.211421 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.211617 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.711589431 +0000 UTC m=+61.530149020 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.212138 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.212482 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.712469942 +0000 UTC m=+61.531029591 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.241514 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzprh\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-kube-api-access-jzprh\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.262093 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-bound-sa-token\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.307270 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxhwt\" (UniqueName: \"kubernetes.io/projected/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-kube-api-access-rxhwt\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.313646 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.313809 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.813785118 +0000 UTC m=+61.632344697 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.313862 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06323b04-a206-4ee4-8cf3-b6a7a588e9de-cert\") pod \"ingress-canary-fz98p\" (UID: \"06323b04-a206-4ee4-8cf3-b6a7a588e9de\") " pod="openshift-ingress-canary/ingress-canary-fz98p" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.313980 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.314000 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-config-volume\") pod \"dns-default-jbqpn\" (UID: \"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb\") " pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.314190 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-metrics-tls\") pod \"dns-default-jbqpn\" (UID: \"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb\") " pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.314337 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.314688 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.81468096 +0000 UTC m=+61.633240529 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.314717 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-tfj94\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.315493 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-config-volume\") pod \"dns-default-jbqpn\" (UID: \"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb\") " pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.319135 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-metrics-tls\") pod \"dns-default-jbqpn\" (UID: \"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb\") " pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.319775 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06323b04-a206-4ee4-8cf3-b6a7a588e9de-cert\") pod \"ingress-canary-fz98p\" (UID: \"06323b04-a206-4ee4-8cf3-b6a7a588e9de\") " pod="openshift-ingress-canary/ingress-canary-fz98p" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.345027 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm4gz\" (UniqueName: \"kubernetes.io/projected/d4f98db3-0859-48bd-a024-ac6229ac9eeb-kube-api-access-fm4gz\") pod \"service-ca-operator-777779d784-cghb5\" (UID: \"d4f98db3-0859-48bd-a024-ac6229ac9eeb\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.381731 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dcjv\" (UniqueName: \"kubernetes.io/projected/a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb-kube-api-access-5dcjv\") pod \"dns-default-jbqpn\" (UID: \"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb\") " pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.394875 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.403502 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvnxb\" (UniqueName: \"kubernetes.io/projected/9226f28a-6db3-4421-9609-0d470c2f76a0-kube-api-access-pvnxb\") pod \"machine-config-server-p8pxn\" (UID: \"9226f28a-6db3-4421-9609-0d470c2f76a0\") " pod="openshift-machine-config-operator/machine-config-server-p8pxn" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.415060 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.415185 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.915160475 +0000 UTC m=+61.733720054 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.415468 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.415789 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:44.91578112 +0000 UTC m=+61.734340699 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.422883 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhd6w\" (UniqueName: \"kubernetes.io/projected/b5f41ad0-ee53-4c72-b82d-64e630edd51f-kube-api-access-rhd6w\") pod \"multus-admission-controller-857f4d67dd-m62h8\" (UID: \"b5f41ad0-ee53-4c72-b82d-64e630edd51f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m62h8" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.439862 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.518579 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.518734 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.018709946 +0000 UTC m=+61.837269525 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.518774 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.519371 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.019359671 +0000 UTC m=+61.837919250 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.540483 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" event={"ID":"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9","Type":"ContainerStarted","Data":"a5bee6a25f73ff086fd99099925587b483b187bd396f811a6a79d5f09729b5b2"} Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.543758 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/01742c1a-692e-462f-a9b1-15dc72332645-bound-sa-token\") pod \"ingress-operator-5b745b69d9-2b7gh\" (UID: \"01742c1a-692e-462f-a9b1-15dc72332645\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.590323 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jbqpn"] Dec 11 10:35:44 crc kubenswrapper[5016]: W1211 10:35:44.598301 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda14b3ab0_d279_46c5_a9d3_cb4d60fba5bb.slice/crio-1c6232b9c90489b6e97b105bd118d5428db731b878fd5fe4de1c378efd0b1119 WatchSource:0}: Error finding container 1c6232b9c90489b6e97b105bd118d5428db731b878fd5fe4de1c378efd0b1119: Status 404 returned error can't find the container with id 1c6232b9c90489b6e97b105bd118d5428db731b878fd5fe4de1c378efd0b1119 Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.600643 5016 projected.go:288] Couldn't get configMap openshift-cluster-machine-approver/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.604444 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fp79\" (UniqueName: \"kubernetes.io/projected/d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e-kube-api-access-8fp79\") pod \"machine-config-controller-84d6567774-gkzj6\" (UID: \"d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.621501 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.621637 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.12161376 +0000 UTC m=+61.940173339 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.621919 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.622194 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.122182665 +0000 UTC m=+61.940742244 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.644472 5016 projected.go:288] Couldn't get configMap openshift-authentication/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.659014 5016 projected.go:288] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.662576 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-949s8\" (UniqueName: \"kubernetes.io/projected/8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d-kube-api-access-949s8\") pod \"machine-config-operator-74547568cd-rht9z\" (UID: \"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.678668 5016 projected.go:288] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.685893 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-p8pxn" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.697903 5016 projected.go:288] Couldn't get configMap openshift-kube-storage-version-migrator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: W1211 10:35:44.699398 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9226f28a_6db3_4421_9609_0d470c2f76a0.slice/crio-4455315fcd425f751d7fac96a0d09034f5491897392429ed9c2de920717181f3 WatchSource:0}: Error finding container 4455315fcd425f751d7fac96a0d09034f5491897392429ed9c2de920717181f3: Status 404 returned error can't find the container with id 4455315fcd425f751d7fac96a0d09034f5491897392429ed9c2de920717181f3 Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.717699 5016 projected.go:288] Couldn't get configMap openshift-console-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.722634 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.723076 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.22306311 +0000 UTC m=+62.041622679 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.737524 5016 projected.go:288] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.741315 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttqrl\" (UniqueName: \"kubernetes.io/projected/06323b04-a206-4ee4-8cf3-b6a7a588e9de-kube-api-access-ttqrl\") pod \"ingress-canary-fz98p\" (UID: \"06323b04-a206-4ee4-8cf3-b6a7a588e9de\") " pod="openshift-ingress-canary/ingress-canary-fz98p" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.775276 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.783125 5016 projected.go:288] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.802504 5016 projected.go:288] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.802934 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wwf7\" (UniqueName: \"kubernetes.io/projected/eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31-kube-api-access-6wwf7\") pod \"csi-hostpathplugin-sg9hf\" (UID: \"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31\") " pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.819212 5016 projected.go:288] Couldn't get configMap openshift-route-controller-manager/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.823659 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.824085 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.324070298 +0000 UTC m=+62.142629867 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.838047 5016 projected.go:288] Couldn't get configMap openshift-kube-controller-manager-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.838081 5016 projected.go:194] Error preparing data for projected volume kube-api-access for pod openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.838141 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b-kube-api-access podName:5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.338123514 +0000 UTC m=+62.156683093 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b-kube-api-access") pod "kube-controller-manager-operator-78b949d7b-8rnb2" (UID: "5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.859166 5016 projected.go:288] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.867203 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.884766 5016 request.go:700] Waited for 1.240700223s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&resourceVersion=27074 Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.887700 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.907467 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.924561 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.924781 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.424740288 +0000 UTC m=+62.243299867 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.925367 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:44 crc kubenswrapper[5016]: E1211 10:35:44.926034 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.42601451 +0000 UTC m=+62.244574089 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.926925 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.948090 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.966515 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 11 10:35:44 crc kubenswrapper[5016]: I1211 10:35:44.991446 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.006904 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-fz98p" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.008366 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.011355 5016 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" secret="" err="failed to sync secret cache: timed out waiting for the condition" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.011400 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.026370 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.026957 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.526926515 +0000 UTC m=+62.345486084 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.027217 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.030339 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.046974 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.067781 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.089345 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.108196 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.127731 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.128085 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.628068297 +0000 UTC m=+62.446627876 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.147712 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.166620 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.187116 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.188622 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.207616 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.227437 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.228593 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.228685 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.728665655 +0000 UTC m=+62.547225234 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.229311 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.229768 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.729752532 +0000 UTC m=+62.548312111 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.242557 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk"] Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.247858 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: W1211 10:35:45.259480 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43e0986a_2fa8_4410_9a6d_1499f5840491.slice/crio-616332dbedf3248fb3a4c174cdb51e13de2d83851c905ed73b91c4c81713870b WatchSource:0}: Error finding container 616332dbedf3248fb3a4c174cdb51e13de2d83851c905ed73b91c4c81713870b: Status 404 returned error can't find the container with id 616332dbedf3248fb3a4c174cdb51e13de2d83851c905ed73b91c4c81713870b Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.267911 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.280518 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-fz98p"] Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.287297 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.293136 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-m62h8" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.307196 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.315171 5016 projected.go:194] Error preparing data for projected volume kube-api-access-8pdh8 for pod openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.315233 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/73e450c1-7bc9-4502-b3c5-e7845ba29342-kube-api-access-8pdh8 podName:73e450c1-7bc9-4502-b3c5-e7845ba29342 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.815215287 +0000 UTC m=+62.633774866 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-8pdh8" (UniqueName: "kubernetes.io/projected/73e450c1-7bc9-4502-b3c5-e7845ba29342-kube-api-access-8pdh8") pod "machine-approver-56656f9798-7tv87" (UID: "73e450c1-7bc9-4502-b3c5-e7845ba29342") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.318438 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-sg9hf"] Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.327035 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.331746 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.331971 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.831948559 +0000 UTC m=+62.650508138 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.332249 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.332553 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.832543754 +0000 UTC m=+62.651103333 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.347404 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.362081 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ab98cf68-77d0-4eb5-8ce0-8c3c52906582-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skczt\" (UID: \"ab98cf68-77d0-4eb5-8ce0-8c3c52906582\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" Dec 11 10:35:45 crc kubenswrapper[5016]: W1211 10:35:45.363243 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeceb77a8_82c2_4c74_a0f3_1f6e19a5fa31.slice/crio-a7a5fd2303f3d4defb5d61475b5b024e571087cabca223a73c762ac5e59c9820 WatchSource:0}: Error finding container a7a5fd2303f3d4defb5d61475b5b024e571087cabca223a73c762ac5e59c9820: Status 404 returned error can't find the container with id a7a5fd2303f3d4defb5d61475b5b024e571087cabca223a73c762ac5e59c9820 Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.368015 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.374662 5016 projected.go:194] Error preparing data for projected volume kube-api-access-tzb6q for pod openshift-authentication/oauth-openshift-558db77b4-4k8l5: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.374735 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a91554fe-759f-4f9a-9d88-7b4d8650a08b-kube-api-access-tzb6q podName:a91554fe-759f-4f9a-9d88-7b4d8650a08b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.874717892 +0000 UTC m=+62.693277461 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-tzb6q" (UniqueName: "kubernetes.io/projected/a91554fe-759f-4f9a-9d88-7b4d8650a08b-kube-api-access-tzb6q") pod "oauth-openshift-558db77b4-4k8l5" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.387168 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.389711 5016 projected.go:194] Error preparing data for projected volume kube-api-access-gnpvq for pod openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.392404 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/86f2da10-45a8-4cc4-9100-3e909d78274f-kube-api-access-gnpvq podName:86f2da10-45a8-4cc4-9100-3e909d78274f nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.889797514 +0000 UTC m=+62.708357093 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-gnpvq" (UniqueName: "kubernetes.io/projected/86f2da10-45a8-4cc4-9100-3e909d78274f-kube-api-access-gnpvq") pod "cluster-samples-operator-665b6dd947-58t4g" (UID: "86f2da10-45a8-4cc4-9100-3e909d78274f") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.407101 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-cghb5"] Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.407609 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.409892 5016 projected.go:194] Error preparing data for projected volume kube-api-access-sws26 for pod openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.409972 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/974b6a63-5953-4683-8909-20b4a93856b1-kube-api-access-sws26 podName:974b6a63-5953-4683-8909-20b4a93856b1 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.909953661 +0000 UTC m=+62.728513240 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-sws26" (UniqueName: "kubernetes.io/projected/974b6a63-5953-4683-8909-20b4a93856b1-kube-api-access-sws26") pod "apiserver-7bbb656c7d-v75dd" (UID: "974b6a63-5953-4683-8909-20b4a93856b1") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.426910 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: W1211 10:35:45.427490 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4f98db3_0859_48bd_a024_ac6229ac9eeb.slice/crio-b2bb75f51bef4b44578dc06ef1effe092ef34951954aa9613bfc5f4154850503 WatchSource:0}: Error finding container b2bb75f51bef4b44578dc06ef1effe092ef34951954aa9613bfc5f4154850503: Status 404 returned error can't find the container with id b2bb75f51bef4b44578dc06ef1effe092ef34951954aa9613bfc5f4154850503 Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.432975 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.433200 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8rnb2\" (UID: \"5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.433310 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.933288455 +0000 UTC m=+62.751848034 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.433467 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.433798 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.933786628 +0000 UTC m=+62.752346207 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.442640 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8rnb2\" (UID: \"5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.448868 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.449602 5016 projected.go:194] Error preparing data for projected volume kube-api-access-qnkxq for pod openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.449697 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1cc10407-5264-4bb5-8223-3ea9a4551c29-kube-api-access-qnkxq podName:1cc10407-5264-4bb5-8223-3ea9a4551c29 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.949678229 +0000 UTC m=+62.768237808 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-qnkxq" (UniqueName: "kubernetes.io/projected/1cc10407-5264-4bb5-8223-3ea9a4551c29-kube-api-access-qnkxq") pod "migrator-59844c95c7-c5sb2" (UID: "1cc10407-5264-4bb5-8223-3ea9a4551c29") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.466808 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.489549 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.498190 5016 projected.go:194] Error preparing data for projected volume kube-api-access-xkp4t for pod openshift-console-operator/console-operator-58897d9998-7rm8d: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.498305 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d20858ea-54b5-474f-bdd9-40eb83d42e57-kube-api-access-xkp4t podName:d20858ea-54b5-474f-bdd9-40eb83d42e57 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:45.998280357 +0000 UTC m=+62.816839936 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-xkp4t" (UniqueName: "kubernetes.io/projected/d20858ea-54b5-474f-bdd9-40eb83d42e57-kube-api-access-xkp4t") pod "console-operator-58897d9998-7rm8d" (UID: "d20858ea-54b5-474f-bdd9-40eb83d42e57") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.506531 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.507774 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-m62h8"] Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.527184 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.528073 5016 projected.go:194] Error preparing data for projected volume kube-api-access-7r7qz for pod openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.528155 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c12d008c-11de-489c-9553-175a76cbfef8-kube-api-access-7r7qz podName:c12d008c-11de-489c-9553-175a76cbfef8 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.028137062 +0000 UTC m=+62.846696641 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-7r7qz" (UniqueName: "kubernetes.io/projected/c12d008c-11de-489c-9553-175a76cbfef8-kube-api-access-7r7qz") pod "openshift-apiserver-operator-796bbdcf4f-tb4rm" (UID: "c12d008c-11de-489c-9553-175a76cbfef8") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.534831 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.535221 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.035208326 +0000 UTC m=+62.853767895 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.544713 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" event={"ID":"d4f98db3-0859-48bd-a024-ac6229ac9eeb","Type":"ContainerStarted","Data":"b2bb75f51bef4b44578dc06ef1effe092ef34951954aa9613bfc5f4154850503"} Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.545494 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-fz98p" event={"ID":"06323b04-a206-4ee4-8cf3-b6a7a588e9de","Type":"ContainerStarted","Data":"6f3d79d2498008f8f6368a9a3fa43730b9b88aed9ecfee09ee84876a981d5a32"} Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.546431 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.546645 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jbqpn" event={"ID":"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb","Type":"ContainerStarted","Data":"8da3d83e1650680b90befd48b5bb74367b8032acf5518f555a3af2c3a676a24d"} Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.546667 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jbqpn" event={"ID":"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb","Type":"ContainerStarted","Data":"1c6232b9c90489b6e97b105bd118d5428db731b878fd5fe4de1c378efd0b1119"} Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.547525 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-p8pxn" event={"ID":"9226f28a-6db3-4421-9609-0d470c2f76a0","Type":"ContainerStarted","Data":"8880e29d106b558b08dbef859c2d7c2cfc95fa9a2a3957983b1dbc4133daeafc"} Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.547550 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-p8pxn" event={"ID":"9226f28a-6db3-4421-9609-0d470c2f76a0","Type":"ContainerStarted","Data":"4455315fcd425f751d7fac96a0d09034f5491897392429ed9c2de920717181f3"} Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.548183 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-m62h8" event={"ID":"b5f41ad0-ee53-4c72-b82d-64e630edd51f","Type":"ContainerStarted","Data":"0b436e9771e4197003dfc6ac07329be5acda71a21be87a87378cf56ad3e0a4f8"} Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.549048 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" event={"ID":"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31","Type":"ContainerStarted","Data":"a7a5fd2303f3d4defb5d61475b5b024e571087cabca223a73c762ac5e59c9820"} Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.549884 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" event={"ID":"43e0986a-2fa8-4410-9a6d-1499f5840491","Type":"ContainerStarted","Data":"616332dbedf3248fb3a4c174cdb51e13de2d83851c905ed73b91c4c81713870b"} Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.550909 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" event={"ID":"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9","Type":"ContainerStarted","Data":"42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c"} Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.551139 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.564800 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.566508 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.573516 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29jgk\" (UniqueName: \"kubernetes.io/projected/af75da0d-e4cb-4961-b57a-ea888c20af89-kube-api-access-29jgk\") pod \"control-plane-machine-set-operator-78cbb6b69f-ljcrh\" (UID: \"af75da0d-e4cb-4961-b57a-ea888c20af89\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.573584 5016 projected.go:194] Error preparing data for projected volume kube-api-access-pq95k for pod openshift-machine-api/machine-api-operator-5694c8668f-tn6f4: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.573634 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cb94a68f-794d-4e0f-9a65-aff1b885d021-kube-api-access-pq95k podName:cb94a68f-794d-4e0f-9a65-aff1b885d021 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.073616132 +0000 UTC m=+62.892175711 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-pq95k" (UniqueName: "kubernetes.io/projected/cb94a68f-794d-4e0f-9a65-aff1b885d021-kube-api-access-pq95k") pod "machine-api-operator-5694c8668f-tn6f4" (UID: "cb94a68f-794d-4e0f-9a65-aff1b885d021") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.587269 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.593355 5016 projected.go:194] Error preparing data for projected volume kube-api-access-pj4jp for pod openshift-authentication-operator/authentication-operator-69f744f599-b485d: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.593422 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/98219d38-61a5-425b-8281-3d0b72e10c77-kube-api-access-pj4jp podName:98219d38-61a5-425b-8281-3d0b72e10c77 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.09340264 +0000 UTC m=+62.911962219 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-pj4jp" (UniqueName: "kubernetes.io/projected/98219d38-61a5-425b-8281-3d0b72e10c77-kube-api-access-pj4jp") pod "authentication-operator-69f744f599-b485d" (UID: "98219d38-61a5-425b-8281-3d0b72e10c77") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.608188 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.622390 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7de297d6-6330-4fd7-b290-b564881e8139-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-gljg8\" (UID: \"7de297d6-6330-4fd7-b290-b564881e8139\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.630283 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.636980 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.637607 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.137589288 +0000 UTC m=+62.956148867 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.640181 5016 projected.go:194] Error preparing data for projected volume kube-api-access-dqr5k for pod openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.640307 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/771549fe-a108-4fe9-a461-043432468961-kube-api-access-dqr5k podName:771549fe-a108-4fe9-a461-043432468961 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.140282875 +0000 UTC m=+62.958842504 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-dqr5k" (UniqueName: "kubernetes.io/projected/771549fe-a108-4fe9-a461-043432468961-kube-api-access-dqr5k") pod "route-controller-manager-6576b87f9c-w2qk9" (UID: "771549fe-a108-4fe9-a461-043432468961") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.650179 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.667120 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.669650 5016 projected.go:194] Error preparing data for projected volume kube-api-access-qzmvt for pod openshift-controller-manager/controller-manager-879f6c89f-xdpcj: failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.669789 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-kube-api-access-qzmvt podName:b1573c39-dbf1-475d-90d8-2bc8d89f18c6 nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.16973329 +0000 UTC m=+62.988292869 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-qzmvt" (UniqueName: "kubernetes.io/projected/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-kube-api-access-qzmvt") pod "controller-manager-879f6c89f-xdpcj" (UID: "b1573c39-dbf1-475d-90d8-2bc8d89f18c6") : failed to sync configmap cache: timed out waiting for the condition Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.687287 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.689858 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.707291 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.715210 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.742098 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.742771 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.242749279 +0000 UTC m=+63.061308888 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.743004 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.744065 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.243962059 +0000 UTC m=+63.062521638 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.749815 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.763870 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsmb8\" (UniqueName: \"kubernetes.io/projected/c22ea3a4-ad05-4dde-9cc8-0a0365d225a6-kube-api-access-tsmb8\") pod \"apiserver-76f77b778f-hbw4j\" (UID: \"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6\") " pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.768140 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.776562 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqx7z\" (UniqueName: \"kubernetes.io/projected/1751c9db-e768-40b7-bc33-1b92ffa26c89-kube-api-access-pqx7z\") pod \"package-server-manager-789f6589d5-9dqtm\" (UID: \"1751c9db-e768-40b7-bc33-1b92ffa26c89\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.776854 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhf76\" (UniqueName: \"kubernetes.io/projected/9176a2f3-177d-4e8e-80d6-688c3e76ed46-kube-api-access-vhf76\") pod \"packageserver-d55dfcdfc-zfqzs\" (UID: \"9176a2f3-177d-4e8e-80d6-688c3e76ed46\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.777705 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c9l9\" (UniqueName: \"kubernetes.io/projected/fc71fe56-8968-4acd-8ae4-50031e11e8db-kube-api-access-9c9l9\") pod \"olm-operator-6b444d44fb-vb5zj\" (UID: \"fc71fe56-8968-4acd-8ae4-50031e11e8db\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.779423 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb596\" (UniqueName: \"kubernetes.io/projected/67dd196e-3271-4222-aa21-dfaf3278eee0-kube-api-access-sb596\") pod \"collect-profiles-29424150-t79rp\" (UID: \"67dd196e-3271-4222-aa21-dfaf3278eee0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.780067 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chqvs\" (UniqueName: \"kubernetes.io/projected/70a267ab-51a0-4f69-a9b9-8b738ab364a9-kube-api-access-chqvs\") pod \"catalog-operator-68c6474976-88cmt\" (UID: \"70a267ab-51a0-4f69-a9b9-8b738ab364a9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.787545 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.794965 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vk5l\" (UniqueName: \"kubernetes.io/projected/e24b6dae-ea03-4141-a0ab-baf91f6b9ab8-kube-api-access-7vk5l\") pod \"dns-operator-744455d44c-zp2gq\" (UID: \"e24b6dae-ea03-4141-a0ab-baf91f6b9ab8\") " pod="openshift-dns-operator/dns-operator-744455d44c-zp2gq" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.807534 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.815569 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwb2h\" (UniqueName: \"kubernetes.io/projected/b3ab377b-889b-40df-984f-322d42490e57-kube-api-access-dwb2h\") pod \"etcd-operator-b45778765-rtj4v\" (UID: \"b3ab377b-889b-40df-984f-322d42490e57\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.829177 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.835520 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnrvv\" (UniqueName: \"kubernetes.io/projected/01742c1a-692e-462f-a9b1-15dc72332645-kube-api-access-nnrvv\") pod \"ingress-operator-5b745b69d9-2b7gh\" (UID: \"01742c1a-692e-462f-a9b1-15dc72332645\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.843776 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.844252 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.344230719 +0000 UTC m=+63.162790298 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.844320 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pdh8\" (UniqueName: \"kubernetes.io/projected/73e450c1-7bc9-4502-b3c5-e7845ba29342-kube-api-access-8pdh8\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.847328 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.847387 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pdh8\" (UniqueName: \"kubernetes.io/projected/73e450c1-7bc9-4502-b3c5-e7845ba29342-kube-api-access-8pdh8\") pod \"machine-approver-56656f9798-7tv87\" (UID: \"73e450c1-7bc9-4502-b3c5-e7845ba29342\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.854144 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk5ms\" (UniqueName: \"kubernetes.io/projected/4a914a7e-cc73-4d59-a122-e58d5f2da33b-kube-api-access-wk5ms\") pod \"router-default-5444994796-8f46b\" (UID: \"4a914a7e-cc73-4d59-a122-e58d5f2da33b\") " pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.866519 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.868053 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.880453 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85rm7\" (UniqueName: \"kubernetes.io/projected/fae98298-ad95-4355-9f4c-0f1c159cb0f9-kube-api-access-85rm7\") pod \"openshift-config-operator-7777fb866f-qldpr\" (UID: \"fae98298-ad95-4355-9f4c-0f1c159cb0f9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.888136 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.897568 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxzs5\" (UniqueName: \"kubernetes.io/projected/5cf7d929-5a97-4045-a7bd-3e92c172c7e3-kube-api-access-hxzs5\") pod \"service-ca-9c57cc56f-p6ggc\" (UID: \"5cf7d929-5a97-4045-a7bd-3e92c172c7e3\") " pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.906602 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.926473 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.929663 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4whxq\" (UniqueName: \"kubernetes.io/projected/d6923b9a-6069-47eb-9513-ba0baa9d44a8-kube-api-access-4whxq\") pod \"openshift-controller-manager-operator-756b6f6bc6-7xxn4\" (UID: \"d6923b9a-6069-47eb-9513-ba0baa9d44a8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.937519 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.949274 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.949330 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnpvq\" (UniqueName: \"kubernetes.io/projected/86f2da10-45a8-4cc4-9100-3e909d78274f-kube-api-access-gnpvq\") pod \"cluster-samples-operator-665b6dd947-58t4g\" (UID: \"86f2da10-45a8-4cc4-9100-3e909d78274f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.949349 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sws26\" (UniqueName: \"kubernetes.io/projected/974b6a63-5953-4683-8909-20b4a93856b1-kube-api-access-sws26\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.949377 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzb6q\" (UniqueName: \"kubernetes.io/projected/a91554fe-759f-4f9a-9d88-7b4d8650a08b-kube-api-access-tzb6q\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:45 crc kubenswrapper[5016]: E1211 10:35:45.950022 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.450004914 +0000 UTC m=+63.268564493 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.953227 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.955137 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnpvq\" (UniqueName: \"kubernetes.io/projected/86f2da10-45a8-4cc4-9100-3e909d78274f-kube-api-access-gnpvq\") pod \"cluster-samples-operator-665b6dd947-58t4g\" (UID: \"86f2da10-45a8-4cc4-9100-3e909d78274f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.956078 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sws26\" (UniqueName: \"kubernetes.io/projected/974b6a63-5953-4683-8909-20b4a93856b1-kube-api-access-sws26\") pod \"apiserver-7bbb656c7d-v75dd\" (UID: \"974b6a63-5953-4683-8909-20b4a93856b1\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.958093 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzb6q\" (UniqueName: \"kubernetes.io/projected/a91554fe-759f-4f9a-9d88-7b4d8650a08b-kube-api-access-tzb6q\") pod \"oauth-openshift-558db77b4-4k8l5\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.963343 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxz68\" (UniqueName: \"kubernetes.io/projected/9448161a-257a-46eb-b9f0-e3afac785b5d-kube-api-access-jxz68\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdl6x\" (UID: \"9448161a-257a-46eb-b9f0-e3afac785b5d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.975624 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.978752 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6"] Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.985140 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kq9n\" (UniqueName: \"kubernetes.io/projected/d8539d49-e453-4b15-a4d6-0e0583b93390-kube-api-access-6kq9n\") pod \"marketplace-operator-79b997595-kp5bk\" (UID: \"d8539d49-e453-4b15-a4d6-0e0583b93390\") " pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.988609 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z"] Dec 11 10:35:45 crc kubenswrapper[5016]: I1211 10:35:45.990001 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.008086 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.009479 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtsx8\" (UniqueName: \"kubernetes.io/projected/ed376fff-5d17-48b1-b48c-ec0c3548dde4-kube-api-access-wtsx8\") pod \"downloads-7954f5f757-88s4j\" (UID: \"ed376fff-5d17-48b1-b48c-ec0c3548dde4\") " pod="openshift-console/downloads-7954f5f757-88s4j" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.019328 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.020241 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg45t\" (UniqueName: \"kubernetes.io/projected/fa3166f9-577e-4994-9290-7ced66d69dcc-kube-api-access-qg45t\") pod \"console-f9d7485db-jpxgn\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.048986 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.049656 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:46 crc kubenswrapper[5016]: E1211 10:35:46.049846 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.54972822 +0000 UTC m=+63.368287799 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.049905 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.049978 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkp4t\" (UniqueName: \"kubernetes.io/projected/d20858ea-54b5-474f-bdd9-40eb83d42e57-kube-api-access-xkp4t\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.050017 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnkxq\" (UniqueName: \"kubernetes.io/projected/1cc10407-5264-4bb5-8223-3ea9a4551c29-kube-api-access-qnkxq\") pod \"migrator-59844c95c7-c5sb2\" (UID: \"1cc10407-5264-4bb5-8223-3ea9a4551c29\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.050142 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7r7qz\" (UniqueName: \"kubernetes.io/projected/c12d008c-11de-489c-9553-175a76cbfef8-kube-api-access-7r7qz\") pod \"openshift-apiserver-operator-796bbdcf4f-tb4rm\" (UID: \"c12d008c-11de-489c-9553-175a76cbfef8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" Dec 11 10:35:46 crc kubenswrapper[5016]: E1211 10:35:46.050238 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.550223633 +0000 UTC m=+63.368783212 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.054254 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.056397 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkp4t\" (UniqueName: \"kubernetes.io/projected/d20858ea-54b5-474f-bdd9-40eb83d42e57-kube-api-access-xkp4t\") pod \"console-operator-58897d9998-7rm8d\" (UID: \"d20858ea-54b5-474f-bdd9-40eb83d42e57\") " pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.056745 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7r7qz\" (UniqueName: \"kubernetes.io/projected/c12d008c-11de-489c-9553-175a76cbfef8-kube-api-access-7r7qz\") pod \"openshift-apiserver-operator-796bbdcf4f-tb4rm\" (UID: \"c12d008c-11de-489c-9553-175a76cbfef8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.063435 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnkxq\" (UniqueName: \"kubernetes.io/projected/1cc10407-5264-4bb5-8223-3ea9a4551c29-kube-api-access-qnkxq\") pod \"migrator-59844c95c7-c5sb2\" (UID: \"1cc10407-5264-4bb5-8223-3ea9a4551c29\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.067508 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.074804 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.074866 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.075100 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.078062 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.087072 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.088269 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.107214 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.125986 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.129844 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.130129 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.147096 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.151589 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.151820 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.152024 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pq95k\" (UniqueName: \"kubernetes.io/projected/cb94a68f-794d-4e0f-9a65-aff1b885d021-kube-api-access-pq95k\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.152109 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pj4jp\" (UniqueName: \"kubernetes.io/projected/98219d38-61a5-425b-8281-3d0b72e10c77-kube-api-access-pj4jp\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.152146 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqr5k\" (UniqueName: \"kubernetes.io/projected/771549fe-a108-4fe9-a461-043432468961-kube-api-access-dqr5k\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:46 crc kubenswrapper[5016]: E1211 10:35:46.154214 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.654188504 +0000 UTC m=+63.472748083 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.161741 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pq95k\" (UniqueName: \"kubernetes.io/projected/cb94a68f-794d-4e0f-9a65-aff1b885d021-kube-api-access-pq95k\") pod \"machine-api-operator-5694c8668f-tn6f4\" (UID: \"cb94a68f-794d-4e0f-9a65-aff1b885d021\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.164545 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pj4jp\" (UniqueName: \"kubernetes.io/projected/98219d38-61a5-425b-8281-3d0b72e10c77-kube-api-access-pj4jp\") pod \"authentication-operator-69f744f599-b485d\" (UID: \"98219d38-61a5-425b-8281-3d0b72e10c77\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.164716 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqr5k\" (UniqueName: \"kubernetes.io/projected/771549fe-a108-4fe9-a461-043432468961-kube-api-access-dqr5k\") pod \"route-controller-manager-6576b87f9c-w2qk9\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.168439 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.168681 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.188896 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.203522 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-zp2gq" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.210792 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.221403 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.246705 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp"] Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.256852 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzmvt\" (UniqueName: \"kubernetes.io/projected/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-kube-api-access-qzmvt\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.257208 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:46 crc kubenswrapper[5016]: E1211 10:35:46.258421 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.758405091 +0000 UTC m=+63.576964670 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.259969 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.270078 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.277240 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.287646 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.287810 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.294502 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.308930 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.315484 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.322439 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzmvt\" (UniqueName: \"kubernetes.io/projected/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-kube-api-access-qzmvt\") pod \"controller-manager-879f6c89f-xdpcj\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.332483 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.346068 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.349054 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.369448 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.370371 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:46 crc kubenswrapper[5016]: E1211 10:35:46.370518 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.870495303 +0000 UTC m=+63.689054892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.370670 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:46 crc kubenswrapper[5016]: E1211 10:35:46.371058 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.871048846 +0000 UTC m=+63.689608425 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.373931 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.378480 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-88s4j" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.391499 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.392970 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.414722 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.416348 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.433225 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.437342 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.447081 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.451355 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.466456 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.466593 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2"] Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.475706 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.476910 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:46 crc kubenswrapper[5016]: E1211 10:35:46.477368 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:46.977353415 +0000 UTC m=+63.795912994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.487975 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.502486 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.510349 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.517665 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.556890 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.561090 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.580770 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:46 crc kubenswrapper[5016]: E1211 10:35:46.581209 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:47.081193923 +0000 UTC m=+63.899753502 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.582033 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.589097 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.591317 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.593115 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.649094 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" event={"ID":"d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e","Type":"ContainerStarted","Data":"26b553a900789dfc85c719a641399f9debd82dda6418feed326fd6b478d028d0"} Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.649410 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" event={"ID":"d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e","Type":"ContainerStarted","Data":"2b447f7c9d0ef2b21ad64dfe0affb822388ae1da9b4dbed2f2dbba67ea0fd39d"} Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.682157 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" event={"ID":"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31","Type":"ContainerStarted","Data":"3c622b842606779ec8bd2a8b469d3192ad137d586efd17bb59ae699616d76f6d"} Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.682614 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:46 crc kubenswrapper[5016]: E1211 10:35:46.684612 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:47.18457299 +0000 UTC m=+64.003132579 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.687876 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" event={"ID":"67dd196e-3271-4222-aa21-dfaf3278eee0","Type":"ContainerStarted","Data":"5358e8083144177c466c2c9f1f78848b5305a31168b3c6498ee73d9426bbd12b"} Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.693551 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" event={"ID":"d4f98db3-0859-48bd-a024-ac6229ac9eeb","Type":"ContainerStarted","Data":"444789220f1f01e99b4d1bdb4cda12846e317c5e944bf60181e6a87f913388ee"} Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.695854 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jbqpn" event={"ID":"a14b3ab0-d279-46c5-a9d3-cb4d60fba5bb","Type":"ContainerStarted","Data":"e922b34b2d7d848843cb46636b5944fa9fab7cdfc76b36acbba7aa9962ed62d1"} Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.696335 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.705154 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-m62h8" event={"ID":"b5f41ad0-ee53-4c72-b82d-64e630edd51f","Type":"ContainerStarted","Data":"72386b86d4e5882ac932e816fe77018cd18b670cfaac4b54ca4657849451f863"} Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.707229 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-m62h8" event={"ID":"b5f41ad0-ee53-4c72-b82d-64e630edd51f","Type":"ContainerStarted","Data":"98ef8f47f2c584889832657b6daf648ef035f6e8c22e4974eb6cdc5d1ba744f5"} Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.707263 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" event={"ID":"43e0986a-2fa8-4410-9a6d-1499f5840491","Type":"ContainerStarted","Data":"137c44d20bcd4b4bb3469fbd379b62bb66921b937c4199e51e2658f1e61d0f8c"} Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.712509 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj"] Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.715822 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs"] Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.736879 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-fz98p" event={"ID":"06323b04-a206-4ee4-8cf3-b6a7a588e9de","Type":"ContainerStarted","Data":"2b44bf9808228fc32300d97fbd7f49290dc9c9c33d1a759a6d0e43a66fc4656d"} Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.742018 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt"] Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.764375 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" event={"ID":"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d","Type":"ContainerStarted","Data":"fa774e97b350aa9b7cf2212df9c60ad91256ef2be34710d939c903453c969fd2"} Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.764462 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" event={"ID":"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d","Type":"ContainerStarted","Data":"d4a5d86aa645fe2761133272a950ab8923d0f096cbc8506220e362337124e1bb"} Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.791999 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:46 crc kubenswrapper[5016]: E1211 10:35:46.792393 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:47.292366365 +0000 UTC m=+64.110925944 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.793596 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh"] Dec 11 10:35:46 crc kubenswrapper[5016]: W1211 10:35:46.854798 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e0a05be_0c8d_45fe_9f42_dfeae7f6f72b.slice/crio-c0dec9063c57a334a02a2bc04f421350f661ab17e5e7bd9ed08bb43068c72a94 WatchSource:0}: Error finding container c0dec9063c57a334a02a2bc04f421350f661ab17e5e7bd9ed08bb43068c72a94: Status 404 returned error can't find the container with id c0dec9063c57a334a02a2bc04f421350f661ab17e5e7bd9ed08bb43068c72a94 Dec 11 10:35:46 crc kubenswrapper[5016]: W1211 10:35:46.875387 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab98cf68_77d0_4eb5_8ce0_8c3c52906582.slice/crio-fb8b5a48b7cbed4587622a03d3d5896165f5a678e3ad3aa4f3113d20136d749c WatchSource:0}: Error finding container fb8b5a48b7cbed4587622a03d3d5896165f5a678e3ad3aa4f3113d20136d749c: Status 404 returned error can't find the container with id fb8b5a48b7cbed4587622a03d3d5896165f5a678e3ad3aa4f3113d20136d749c Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.894056 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:46 crc kubenswrapper[5016]: E1211 10:35:46.894450 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:47.394434969 +0000 UTC m=+64.212994548 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:46 crc kubenswrapper[5016]: W1211 10:35:46.900202 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfc71fe56_8968_4acd_8ae4_50031e11e8db.slice/crio-b37b914999810be8e39bab3a4ea3ec65aa7a2949fd7ba086f9c7f0e207725f07 WatchSource:0}: Error finding container b37b914999810be8e39bab3a4ea3ec65aa7a2949fd7ba086f9c7f0e207725f07: Status 404 returned error can't find the container with id b37b914999810be8e39bab3a4ea3ec65aa7a2949fd7ba086f9c7f0e207725f07 Dec 11 10:35:46 crc kubenswrapper[5016]: W1211 10:35:46.904449 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf75da0d_e4cb_4961_b57a_ea888c20af89.slice/crio-3cbb1a050b27e2f08dc3466dcf0820c46c6d62a7b4692a566341cefdbdcad0e7 WatchSource:0}: Error finding container 3cbb1a050b27e2f08dc3466dcf0820c46c6d62a7b4692a566341cefdbdcad0e7: Status 404 returned error can't find the container with id 3cbb1a050b27e2f08dc3466dcf0820c46c6d62a7b4692a566341cefdbdcad0e7 Dec 11 10:35:46 crc kubenswrapper[5016]: I1211 10:35:46.958901 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt"] Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.002961 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:47 crc kubenswrapper[5016]: E1211 10:35:47.003299 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:47.503288 +0000 UTC m=+64.321847579 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.053208 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd"] Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.072330 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh"] Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.103531 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:47 crc kubenswrapper[5016]: E1211 10:35:47.103813 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:47.603798206 +0000 UTC m=+64.422357785 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:47 crc kubenswrapper[5016]: W1211 10:35:47.165363 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a914a7e_cc73_4d59_a122_e58d5f2da33b.slice/crio-05dde996c256709eb5368f06bf1ed16908d27a9d591c07001de3beee6f300be5 WatchSource:0}: Error finding container 05dde996c256709eb5368f06bf1ed16908d27a9d591c07001de3beee6f300be5: Status 404 returned error can't find the container with id 05dde996c256709eb5368f06bf1ed16908d27a9d591c07001de3beee6f300be5 Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.204677 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:47 crc kubenswrapper[5016]: E1211 10:35:47.205056 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:47.70501814 +0000 UTC m=+64.523577719 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.275638 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-zp2gq"] Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.307407 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:47 crc kubenswrapper[5016]: E1211 10:35:47.307867 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:47.807845993 +0000 UTC m=+64.626405572 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.408773 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:47 crc kubenswrapper[5016]: E1211 10:35:47.409543 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:47.909529258 +0000 UTC m=+64.728088837 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.422376 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-p8pxn" podStartSLOduration=6.422357424 podStartE2EDuration="6.422357424s" podCreationTimestamp="2025-12-11 10:35:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:47.421115343 +0000 UTC m=+64.239674932" watchObservedRunningTime="2025-12-11 10:35:47.422357424 +0000 UTC m=+64.240917023" Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.518839 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:47 crc kubenswrapper[5016]: E1211 10:35:47.519298 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:48.019280191 +0000 UTC m=+64.837839770 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.525589 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cghb5" podStartSLOduration=37.525567687 podStartE2EDuration="37.525567687s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:47.511600813 +0000 UTC m=+64.330160402" watchObservedRunningTime="2025-12-11 10:35:47.525567687 +0000 UTC m=+64.344127266" Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.621469 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:47 crc kubenswrapper[5016]: E1211 10:35:47.622123 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:48.122111364 +0000 UTC m=+64.940670943 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.722857 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:47 crc kubenswrapper[5016]: E1211 10:35:47.723141 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:48.223123053 +0000 UTC m=+65.041682632 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.723740 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:47 crc kubenswrapper[5016]: E1211 10:35:47.733263 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:48.233243812 +0000 UTC m=+65.051803391 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.750993 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-qldpr"] Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.765542 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hbw4j"] Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.779301 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" podStartSLOduration=5.779281217 podStartE2EDuration="5.779281217s" podCreationTimestamp="2025-12-11 10:35:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:47.775461192 +0000 UTC m=+64.594020791" watchObservedRunningTime="2025-12-11 10:35:47.779281217 +0000 UTC m=+64.597840796" Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.803429 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" event={"ID":"70a267ab-51a0-4f69-a9b9-8b738ab364a9","Type":"ContainerStarted","Data":"f0bc65ff176c1c6b955607f7bfecfab89c0584b0318e17ee3baf5a395c9e967a"} Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.809290 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-zp2gq" event={"ID":"e24b6dae-ea03-4141-a0ab-baf91f6b9ab8","Type":"ContainerStarted","Data":"94c03384bcf85307c92215f7f453203afa4e74d3b559fbe8e75a353ae8bc4e6b"} Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.817363 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" event={"ID":"9176a2f3-177d-4e8e-80d6-688c3e76ed46","Type":"ContainerStarted","Data":"2ec827be5d14823400a2abf281b9cfc884ea0c33b6329e2e11be23a7d32a75f9"} Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.828910 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-fz98p" podStartSLOduration=6.828896108 podStartE2EDuration="6.828896108s" podCreationTimestamp="2025-12-11 10:35:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:47.828290814 +0000 UTC m=+64.646850403" watchObservedRunningTime="2025-12-11 10:35:47.828896108 +0000 UTC m=+64.647455687" Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.829333 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:47 crc kubenswrapper[5016]: E1211 10:35:47.829686 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:48.329671498 +0000 UTC m=+65.148231077 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.840788 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" event={"ID":"974b6a63-5953-4683-8909-20b4a93856b1","Type":"ContainerStarted","Data":"fc0452c5bdba6995271e668709191864678ff60b9b03a20bb36795f3c6d64817"} Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.847174 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" event={"ID":"73e450c1-7bc9-4502-b3c5-e7845ba29342","Type":"ContainerStarted","Data":"2f4fd5b44d6a2543336cb31c4c0f794f6f827e5e49ed92b75855d8853d46bf11"} Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.871435 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-8f46b" event={"ID":"4a914a7e-cc73-4d59-a122-e58d5f2da33b","Type":"ContainerStarted","Data":"05dde996c256709eb5368f06bf1ed16908d27a9d591c07001de3beee6f300be5"} Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.876968 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" event={"ID":"fc71fe56-8968-4acd-8ae4-50031e11e8db","Type":"ContainerStarted","Data":"b37b914999810be8e39bab3a4ea3ec65aa7a2949fd7ba086f9c7f0e207725f07"} Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.878290 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" event={"ID":"af75da0d-e4cb-4961-b57a-ea888c20af89","Type":"ContainerStarted","Data":"3cbb1a050b27e2f08dc3466dcf0820c46c6d62a7b4692a566341cefdbdcad0e7"} Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.903309 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8"] Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.925952 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm"] Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.935748 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:47 crc kubenswrapper[5016]: E1211 10:35:47.945768 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:48.445750998 +0000 UTC m=+65.264310577 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:47 crc kubenswrapper[5016]: I1211 10:35:47.995410 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" event={"ID":"01742c1a-692e-462f-a9b1-15dc72332645","Type":"ContainerStarted","Data":"463266c1e953ba9bbd0c80044843f25ddd1f5d4f0be9d37496e2f46330bd683c"} Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.013388 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" event={"ID":"ab98cf68-77d0-4eb5-8ce0-8c3c52906582","Type":"ContainerStarted","Data":"fb8b5a48b7cbed4587622a03d3d5896165f5a678e3ad3aa4f3113d20136d749c"} Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.061680 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:48 crc kubenswrapper[5016]: E1211 10:35:48.062081 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:48.562065953 +0000 UTC m=+65.380625532 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.100425 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" event={"ID":"5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b","Type":"ContainerStarted","Data":"c0dec9063c57a334a02a2bc04f421350f661ab17e5e7bd9ed08bb43068c72a94"} Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.116598 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-tfj94"] Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.120109 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zcxnk" podStartSLOduration=38.120089142 podStartE2EDuration="38.120089142s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:48.119110707 +0000 UTC m=+64.937670306" watchObservedRunningTime="2025-12-11 10:35:48.120089142 +0000 UTC m=+64.938648721" Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.163589 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:48 crc kubenswrapper[5016]: E1211 10:35:48.163902 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:48.66389064 +0000 UTC m=+65.482450229 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.233803 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-jbqpn" podStartSLOduration=7.233779542 podStartE2EDuration="7.233779542s" podCreationTimestamp="2025-12-11 10:35:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:48.207033063 +0000 UTC m=+65.025592642" watchObservedRunningTime="2025-12-11 10:35:48.233779542 +0000 UTC m=+65.052339121" Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.234836 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-rtj4v"] Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.245589 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2"] Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.264500 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:48 crc kubenswrapper[5016]: E1211 10:35:48.266358 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:48.766336895 +0000 UTC m=+65.584896474 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.303960 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-m62h8" podStartSLOduration=38.30391764 podStartE2EDuration="38.30391764s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:48.277324805 +0000 UTC m=+65.095884404" watchObservedRunningTime="2025-12-11 10:35:48.30391764 +0000 UTC m=+65.122477229" Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.368198 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:48 crc kubenswrapper[5016]: E1211 10:35:48.368999 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:48.868978993 +0000 UTC m=+65.687538572 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.477645 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:48 crc kubenswrapper[5016]: E1211 10:35:48.478290 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:48.978268555 +0000 UTC m=+65.796828134 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.579648 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:48 crc kubenswrapper[5016]: E1211 10:35:48.579988 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.079977141 +0000 UTC m=+65.898536720 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.680412 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:48 crc kubenswrapper[5016]: E1211 10:35:48.680641 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.180606779 +0000 UTC m=+65.999166358 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.681009 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:48 crc kubenswrapper[5016]: E1211 10:35:48.681397 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.181386529 +0000 UTC m=+65.999946198 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.781967 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:48 crc kubenswrapper[5016]: E1211 10:35:48.782367 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.282352056 +0000 UTC m=+66.100911635 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.865305 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kp5bk"] Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.867292 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4k8l5"] Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.875961 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-7rm8d"] Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.877874 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4"] Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.884092 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:48 crc kubenswrapper[5016]: E1211 10:35:48.884388 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.384376779 +0000 UTC m=+66.202936358 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.886366 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xdpcj"] Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.888010 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-tn6f4"] Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.892188 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-88s4j"] Dec 11 10:35:48 crc kubenswrapper[5016]: I1211 10:35:48.984986 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:48 crc kubenswrapper[5016]: E1211 10:35:48.985327 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.485311776 +0000 UTC m=+66.303871355 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.024400 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-b485d"] Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.030513 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x"] Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.033436 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm"] Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.039762 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9"] Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.050519 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-p6ggc"] Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.055378 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g"] Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.066125 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-jpxgn"] Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.086792 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:49 crc kubenswrapper[5016]: E1211 10:35:49.087110 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.587095983 +0000 UTC m=+66.405655562 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.090599 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2" event={"ID":"1cc10407-5264-4bb5-8223-3ea9a4551c29","Type":"ContainerStarted","Data":"4910940119bec1089aecb8ce6a102c01f13aa62e4916031c7b46220131fa5224"} Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.091383 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" event={"ID":"fae98298-ad95-4355-9f4c-0f1c159cb0f9","Type":"ContainerStarted","Data":"3724c79ad3fa13acc1e7edb67c15d824a9584d5e4b98931f4b954930067a81dd"} Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.092371 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" event={"ID":"d6923b9a-6069-47eb-9513-ba0baa9d44a8","Type":"ContainerStarted","Data":"92200e683f2183fec93abb73a250bbf5d27d6d1448e771a877517f0f227fc60b"} Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.093090 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" event={"ID":"1751c9db-e768-40b7-bc33-1b92ffa26c89","Type":"ContainerStarted","Data":"17ff1041653244e6984c3be8f12529390241fdd23a923a30abfc1b9d37e73b0a"} Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.093799 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" event={"ID":"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6","Type":"ContainerStarted","Data":"48425d1aee77ba886a3c4ed1459b994409ae3c943be1aa14d1cea540663804de"} Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.094458 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" event={"ID":"b3ab377b-889b-40df-984f-322d42490e57","Type":"ContainerStarted","Data":"4a2c8cbac62ad05b2af78808bf60b9869eada209c0f680842db97591dd561193"} Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.095109 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" event={"ID":"7de297d6-6330-4fd7-b290-b564881e8139","Type":"ContainerStarted","Data":"e2b138612d3ada3fa9749a57f0506d82b537b4e1a891340d6fe0acbdf8ca5112"} Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.096315 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" event={"ID":"a91554fe-759f-4f9a-9d88-7b4d8650a08b","Type":"ContainerStarted","Data":"31e8b1100249974222875fee566bee010d4107f11e39abb910092b690e0af032"} Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.096424 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" podUID="ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" containerName="kube-multus-additional-cni-plugins" containerID="cri-o://42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" gracePeriod=30 Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.187760 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:49 crc kubenswrapper[5016]: E1211 10:35:49.187995 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.687963218 +0000 UTC m=+66.506522807 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.188323 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:49 crc kubenswrapper[5016]: E1211 10:35:49.188803 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.688779258 +0000 UTC m=+66.507338897 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:49 crc kubenswrapper[5016]: W1211 10:35:49.254891 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd20858ea_54b5_474f_bdd9_40eb83d42e57.slice/crio-ff6052185ea7a861aad8af68fbe7ea8dc4ed563e8ee8be49bd9c5e4245ff7e49 WatchSource:0}: Error finding container ff6052185ea7a861aad8af68fbe7ea8dc4ed563e8ee8be49bd9c5e4245ff7e49: Status 404 returned error can't find the container with id ff6052185ea7a861aad8af68fbe7ea8dc4ed563e8ee8be49bd9c5e4245ff7e49 Dec 11 10:35:49 crc kubenswrapper[5016]: W1211 10:35:49.255496 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8539d49_e453_4b15_a4d6_0e0583b93390.slice/crio-c14b78520c7ea8e84e262157bd24dd17e9a6458f6d2f06a0084cb7eef9157778 WatchSource:0}: Error finding container c14b78520c7ea8e84e262157bd24dd17e9a6458f6d2f06a0084cb7eef9157778: Status 404 returned error can't find the container with id c14b78520c7ea8e84e262157bd24dd17e9a6458f6d2f06a0084cb7eef9157778 Dec 11 10:35:49 crc kubenswrapper[5016]: W1211 10:35:49.257244 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded376fff_5d17_48b1_b48c_ec0c3548dde4.slice/crio-becb95c9c2c61ab9072d453fc30a07b17a05684fb44ae2f50cdde87efdfbbe9e WatchSource:0}: Error finding container becb95c9c2c61ab9072d453fc30a07b17a05684fb44ae2f50cdde87efdfbbe9e: Status 404 returned error can't find the container with id becb95c9c2c61ab9072d453fc30a07b17a05684fb44ae2f50cdde87efdfbbe9e Dec 11 10:35:49 crc kubenswrapper[5016]: W1211 10:35:49.258653 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb94a68f_794d_4e0f_9a65_aff1b885d021.slice/crio-e8e0a8d0377f443a913df2314dfe91de96427ff3bdf5650cf09445c202a2bc84 WatchSource:0}: Error finding container e8e0a8d0377f443a913df2314dfe91de96427ff3bdf5650cf09445c202a2bc84: Status 404 returned error can't find the container with id e8e0a8d0377f443a913df2314dfe91de96427ff3bdf5650cf09445c202a2bc84 Dec 11 10:35:49 crc kubenswrapper[5016]: W1211 10:35:49.266611 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc12d008c_11de_489c_9553_175a76cbfef8.slice/crio-e3c93b1b0f96228e97158cebbf1a08d831b27f6fd1dad63de8fb2dce4334143a WatchSource:0}: Error finding container e3c93b1b0f96228e97158cebbf1a08d831b27f6fd1dad63de8fb2dce4334143a: Status 404 returned error can't find the container with id e3c93b1b0f96228e97158cebbf1a08d831b27f6fd1dad63de8fb2dce4334143a Dec 11 10:35:49 crc kubenswrapper[5016]: W1211 10:35:49.268966 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9448161a_257a_46eb_b9f0_e3afac785b5d.slice/crio-23326e059a9b8c6d8a4bac4ec424d8802ebe6bef08c72b5d19ab76f4ce9e4f0a WatchSource:0}: Error finding container 23326e059a9b8c6d8a4bac4ec424d8802ebe6bef08c72b5d19ab76f4ce9e4f0a: Status 404 returned error can't find the container with id 23326e059a9b8c6d8a4bac4ec424d8802ebe6bef08c72b5d19ab76f4ce9e4f0a Dec 11 10:35:49 crc kubenswrapper[5016]: W1211 10:35:49.277416 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5cf7d929_5a97_4045_a7bd_3e92c172c7e3.slice/crio-a0ccc0b4e807636548ec0eae342803b3f76804dc7cabf945e990627d908be63c WatchSource:0}: Error finding container a0ccc0b4e807636548ec0eae342803b3f76804dc7cabf945e990627d908be63c: Status 404 returned error can't find the container with id a0ccc0b4e807636548ec0eae342803b3f76804dc7cabf945e990627d908be63c Dec 11 10:35:49 crc kubenswrapper[5016]: W1211 10:35:49.278711 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod771549fe_a108_4fe9_a461_043432468961.slice/crio-5934380da0368a7bba715eb26ded938297c71eb57ae67081d59cfa8e8ab6b382 WatchSource:0}: Error finding container 5934380da0368a7bba715eb26ded938297c71eb57ae67081d59cfa8e8ab6b382: Status 404 returned error can't find the container with id 5934380da0368a7bba715eb26ded938297c71eb57ae67081d59cfa8e8ab6b382 Dec 11 10:35:49 crc kubenswrapper[5016]: W1211 10:35:49.283374 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfa3166f9_577e_4994_9290_7ced66d69dcc.slice/crio-9f6adef18ccb4454b2866fdb14291125e8fff76c0bd7e44ba9aeeb8058d198e0 WatchSource:0}: Error finding container 9f6adef18ccb4454b2866fdb14291125e8fff76c0bd7e44ba9aeeb8058d198e0: Status 404 returned error can't find the container with id 9f6adef18ccb4454b2866fdb14291125e8fff76c0bd7e44ba9aeeb8058d198e0 Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.289474 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:49 crc kubenswrapper[5016]: E1211 10:35:49.289698 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.789619192 +0000 UTC m=+66.608178771 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.290495 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:49 crc kubenswrapper[5016]: E1211 10:35:49.294726 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.793696662 +0000 UTC m=+66.612256251 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.391526 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:49 crc kubenswrapper[5016]: E1211 10:35:49.391883 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.891867901 +0000 UTC m=+66.710427480 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.492861 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:49 crc kubenswrapper[5016]: E1211 10:35:49.495508 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:49.993387042 +0000 UTC m=+66.811946621 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.596594 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:49 crc kubenswrapper[5016]: E1211 10:35:49.597069 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.097053066 +0000 UTC m=+66.915612645 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.698703 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:49 crc kubenswrapper[5016]: E1211 10:35:49.699095 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.199084859 +0000 UTC m=+67.017644438 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.799729 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:49 crc kubenswrapper[5016]: E1211 10:35:49.799911 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.299885722 +0000 UTC m=+67.118445301 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.800041 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:49 crc kubenswrapper[5016]: E1211 10:35:49.800376 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.300365264 +0000 UTC m=+67.118924843 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:49 crc kubenswrapper[5016]: I1211 10:35:49.901317 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:49 crc kubenswrapper[5016]: E1211 10:35:49.901730 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.401713391 +0000 UTC m=+67.220272960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.002657 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.003102 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.503080838 +0000 UTC m=+67.321640417 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.101383 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" event={"ID":"c12d008c-11de-489c-9553-175a76cbfef8","Type":"ContainerStarted","Data":"e3c93b1b0f96228e97158cebbf1a08d831b27f6fd1dad63de8fb2dce4334143a"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.102598 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-7rm8d" event={"ID":"d20858ea-54b5-474f-bdd9-40eb83d42e57","Type":"ContainerStarted","Data":"ff6052185ea7a861aad8af68fbe7ea8dc4ed563e8ee8be49bd9c5e4245ff7e49"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.103224 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.103356 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.603331128 +0000 UTC m=+67.421890707 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.103428 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" event={"ID":"cb94a68f-794d-4e0f-9a65-aff1b885d021","Type":"ContainerStarted","Data":"e8e0a8d0377f443a913df2314dfe91de96427ff3bdf5650cf09445c202a2bc84"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.103479 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.103791 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.603783609 +0000 UTC m=+67.422343178 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.104502 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" event={"ID":"9448161a-257a-46eb-b9f0-e3afac785b5d","Type":"ContainerStarted","Data":"23326e059a9b8c6d8a4bac4ec424d8802ebe6bef08c72b5d19ab76f4ce9e4f0a"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.105794 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" event={"ID":"67dd196e-3271-4222-aa21-dfaf3278eee0","Type":"ContainerStarted","Data":"a935f10f6a83a521998e85aff4db8e97b019c18856bc6745278ee7decbad7bcf"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.107503 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" event={"ID":"d0bd8e76-c7d5-486a-99a3-54e04ac3cb7e","Type":"ContainerStarted","Data":"656f8bd54b3079a95b38559d5a9bd9a5086992dfb514a7e591f7ad567c9880c2"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.108599 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" event={"ID":"98219d38-61a5-425b-8281-3d0b72e10c77","Type":"ContainerStarted","Data":"43a6aa1390440ffa17df6153c527e237ee7f66ada039c290f5634a6e56e0ec2f"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.110046 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" event={"ID":"5cf7d929-5a97-4045-a7bd-3e92c172c7e3","Type":"ContainerStarted","Data":"a0ccc0b4e807636548ec0eae342803b3f76804dc7cabf945e990627d908be63c"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.111037 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jpxgn" event={"ID":"fa3166f9-577e-4994-9290-7ced66d69dcc","Type":"ContainerStarted","Data":"9f6adef18ccb4454b2866fdb14291125e8fff76c0bd7e44ba9aeeb8058d198e0"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.111829 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" event={"ID":"b1573c39-dbf1-475d-90d8-2bc8d89f18c6","Type":"ContainerStarted","Data":"9fcd8b74d91cdc252e1349ac6381ae4e544549c7eb1e89e495ad5038b021299d"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.112471 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" event={"ID":"d8539d49-e453-4b15-a4d6-0e0583b93390","Type":"ContainerStarted","Data":"c14b78520c7ea8e84e262157bd24dd17e9a6458f6d2f06a0084cb7eef9157778"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.113261 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-88s4j" event={"ID":"ed376fff-5d17-48b1-b48c-ec0c3548dde4","Type":"ContainerStarted","Data":"becb95c9c2c61ab9072d453fc30a07b17a05684fb44ae2f50cdde87efdfbbe9e"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.113929 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" event={"ID":"771549fe-a108-4fe9-a461-043432468961","Type":"ContainerStarted","Data":"5934380da0368a7bba715eb26ded938297c71eb57ae67081d59cfa8e8ab6b382"} Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.204919 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.205246 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.705211887 +0000 UTC m=+67.523771466 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.205501 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.205818 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.705801592 +0000 UTC m=+67.524361171 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.306207 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.306411 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.806371199 +0000 UTC m=+67.624930778 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.306495 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.306860 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.806846311 +0000 UTC m=+67.625405890 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.408542 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.408778 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.908740681 +0000 UTC m=+67.727300280 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.409329 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.409628 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:50.909615642 +0000 UTC m=+67.728175221 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.510891 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.511084 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.011054522 +0000 UTC m=+67.829614101 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.511232 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.511664 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.011649806 +0000 UTC m=+67.830209385 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.612983 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.613161 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.113127316 +0000 UTC m=+67.931686905 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.613589 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.614002 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.113985807 +0000 UTC m=+67.932545386 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.714367 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.714524 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.214501433 +0000 UTC m=+68.033061002 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.714732 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.715120 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.215111648 +0000 UTC m=+68.033671227 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.816196 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.816354 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.316329832 +0000 UTC m=+68.134889411 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.816476 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.816811 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.316801373 +0000 UTC m=+68.135360952 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.917552 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.917710 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.417689238 +0000 UTC m=+68.236248817 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:50 crc kubenswrapper[5016]: I1211 10:35:50.917995 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:50 crc kubenswrapper[5016]: E1211 10:35:50.918304 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.418296024 +0000 UTC m=+68.236855593 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.018789 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:51 crc kubenswrapper[5016]: E1211 10:35:51.019594 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.519572688 +0000 UTC m=+68.338132267 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.068302 5016 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.122038 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:51 crc kubenswrapper[5016]: E1211 10:35:51.122355 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.62234339 +0000 UTC m=+68.440902969 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.132827 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" event={"ID":"d8539d49-e453-4b15-a4d6-0e0583b93390","Type":"ContainerStarted","Data":"c906b2e6c1fdf3d3c54425c0ce75c9e2602c7ad43116b532768fcb1075c0b67d"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.134123 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" event={"ID":"86f2da10-45a8-4cc4-9100-3e909d78274f","Type":"ContainerStarted","Data":"7ef0591e37bfe318ca1d1809248a9634cf4c77862ca21d558e0eb5070473cf7d"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.136807 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" event={"ID":"b1573c39-dbf1-475d-90d8-2bc8d89f18c6","Type":"ContainerStarted","Data":"e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.139113 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" event={"ID":"01742c1a-692e-462f-a9b1-15dc72332645","Type":"ContainerStarted","Data":"4a300216299b98972ced2c0f088fb350be3f14ebd57aca586f1890ddab035f35"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.144562 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" event={"ID":"c12d008c-11de-489c-9553-175a76cbfef8","Type":"ContainerStarted","Data":"5b784c62689df2be45a39470c976f24870990ec2dcfc9b75a4f5a9189834291e"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.148051 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" event={"ID":"5cf7d929-5a97-4045-a7bd-3e92c172c7e3","Type":"ContainerStarted","Data":"4dcdb6253b9f48834998e92b6dcf0b15b00096145c45756baf96e186dc689cfe"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.153091 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-zp2gq" event={"ID":"e24b6dae-ea03-4141-a0ab-baf91f6b9ab8","Type":"ContainerStarted","Data":"394c9cd07ebe2612bb56413b79d7801063795bec80283ffc86d150f246c49876"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.165555 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" event={"ID":"9448161a-257a-46eb-b9f0-e3afac785b5d","Type":"ContainerStarted","Data":"cd1ca385451df411e01368c74058d6ff814f4fb6ccf2765407461aeee8f397cc"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.168432 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" event={"ID":"8f7503d5-38fd-43ab-8a20-ea1f0a19fa7d","Type":"ContainerStarted","Data":"ee3bd42e9419f8430d7d20e8612fd5f7a9a4ffd6b1a54106dfa5eb41837b5b33"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.170388 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" event={"ID":"af75da0d-e4cb-4961-b57a-ea888c20af89","Type":"ContainerStarted","Data":"9ce4148ece840ebe41ad58c912bfffe8834d0fbc87aad978fef1853fa1e8b6a2"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.172721 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" event={"ID":"d6923b9a-6069-47eb-9513-ba0baa9d44a8","Type":"ContainerStarted","Data":"95642247fc959ab4ca55da98bf1c193ba08f909bfa3c9ce8c1012992f9663609"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.183322 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" event={"ID":"73e450c1-7bc9-4502-b3c5-e7845ba29342","Type":"ContainerStarted","Data":"023aa09b3d18a096e3f47553f4be8c6399a97ea897ba5564fab89821c9eff5a7"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.205957 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" event={"ID":"7de297d6-6330-4fd7-b290-b564881e8139","Type":"ContainerStarted","Data":"bb9120eb3147840d2e9dbbd66ace630c12c7f141a227e0749c5f54b2bcb32df3"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.223599 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jpxgn" event={"ID":"fa3166f9-577e-4994-9290-7ced66d69dcc","Type":"ContainerStarted","Data":"c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.230571 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:51 crc kubenswrapper[5016]: E1211 10:35:51.230809 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.730770431 +0000 UTC m=+68.549330010 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.231092 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:51 crc kubenswrapper[5016]: E1211 10:35:51.231552 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-11 10:35:51.73153449 +0000 UTC m=+68.550094069 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4cp4w" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.278668 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" event={"ID":"9176a2f3-177d-4e8e-80d6-688c3e76ed46","Type":"ContainerStarted","Data":"4b6b8c5f3799b98387707a6c2c157f1c545485b93c39dc7d65fe51e2bf81aee2"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.279818 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.294583 5016 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zfqzs container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" start-of-body= Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.294637 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" podUID="9176a2f3-177d-4e8e-80d6-688c3e76ed46" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.298796 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" event={"ID":"5e0a05be-0c8d-45fe-9f42-dfeae7f6f72b","Type":"ContainerStarted","Data":"0612a95d9734c25f23f13cee1b27b59c447e273ea046086707267a087f09e204"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.301562 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rht9z" podStartSLOduration=41.301461152 podStartE2EDuration="41.301461152s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:51.225423979 +0000 UTC m=+68.043983588" watchObservedRunningTime="2025-12-11 10:35:51.301461152 +0000 UTC m=+68.120020761" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.305303 5016 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-12-11T10:35:51.068550785Z","Handler":null,"Name":""} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.325519 5016 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.325560 5016 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.332570 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.340684 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" podStartSLOduration=41.340666728 podStartE2EDuration="41.340666728s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:51.34033367 +0000 UTC m=+68.158893259" watchObservedRunningTime="2025-12-11 10:35:51.340666728 +0000 UTC m=+68.159226307" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.341026 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-jpxgn" podStartSLOduration=41.341017857 podStartE2EDuration="41.341017857s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:51.305617935 +0000 UTC m=+68.124177534" watchObservedRunningTime="2025-12-11 10:35:51.341017857 +0000 UTC m=+68.159577456" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.382122 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" event={"ID":"70a267ab-51a0-4f69-a9b9-8b738ab364a9","Type":"ContainerStarted","Data":"63339743ae28de2638b2c557caafdfae63d9f3e87c94a3b8d0c29a6aea805fc1"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.382691 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.385948 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" event={"ID":"b3ab377b-889b-40df-984f-322d42490e57","Type":"ContainerStarted","Data":"e66a99ac2b49389f24ecee8e495769c012507a9a389fd94598e167bef265e32d"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.390368 5016 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-88cmt container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.390438 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" podUID="70a267ab-51a0-4f69-a9b9-8b738ab364a9" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.393702 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" event={"ID":"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31","Type":"ContainerStarted","Data":"d87a2c15f9d07b0db095d64e8eea943b0672472f6950d7130cb310d7cc09acf3"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.395912 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" event={"ID":"1751c9db-e768-40b7-bc33-1b92ffa26c89","Type":"ContainerStarted","Data":"7d0569adf594016dac8af4803bf451016443c4e521474d3a2c6774f5241aa301"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.416022 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gljg8" podStartSLOduration=41.415998694 podStartE2EDuration="41.415998694s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:51.404100341 +0000 UTC m=+68.222659930" watchObservedRunningTime="2025-12-11 10:35:51.415998694 +0000 UTC m=+68.234558283" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.420746 5016 generic.go:334] "Generic (PLEG): container finished" podID="c22ea3a4-ad05-4dde-9cc8-0a0365d225a6" containerID="646af7a6c611eaa39d4f129d4d29872252039c895bda719e84e13606c0f9127d" exitCode=0 Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.420850 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" event={"ID":"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6","Type":"ContainerDied","Data":"646af7a6c611eaa39d4f129d4d29872252039c895bda719e84e13606c0f9127d"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.434920 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.437867 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" event={"ID":"fc71fe56-8968-4acd-8ae4-50031e11e8db","Type":"ContainerStarted","Data":"dbadf638d3ed7fac1c06b282c86c64a6afd0d4cf44fb95b907e9b86d411b400b"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.468467 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.496148 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" podStartSLOduration=41.496119557 podStartE2EDuration="41.496119557s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:51.486094581 +0000 UTC m=+68.304654170" watchObservedRunningTime="2025-12-11 10:35:51.496119557 +0000 UTC m=+68.314679136" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.502974 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.503576 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-8f46b" event={"ID":"4a914a7e-cc73-4d59-a122-e58d5f2da33b","Type":"ContainerStarted","Data":"9e9812d597cf85fc96473c61410b2ec7fe8781768a44094eb343ba2b670e8555"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.503615 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.516827 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" event={"ID":"ab98cf68-77d0-4eb5-8ce0-8c3c52906582","Type":"ContainerStarted","Data":"5b980bc20e3c9e268b07f87917660fee61ef5647a92820f7e8d21d522111ab94"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.530635 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" podStartSLOduration=41.530607207 podStartE2EDuration="41.530607207s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:51.522494977 +0000 UTC m=+68.341054566" watchObservedRunningTime="2025-12-11 10:35:51.530607207 +0000 UTC m=+68.349166786" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.544747 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.564814 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8rnb2" podStartSLOduration=41.564795739 podStartE2EDuration="41.564795739s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:51.563259371 +0000 UTC m=+68.381818960" watchObservedRunningTime="2025-12-11 10:35:51.564795739 +0000 UTC m=+68.383355318" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.570291 5016 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.570332 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.628163 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" event={"ID":"98219d38-61a5-425b-8281-3d0b72e10c77","Type":"ContainerStarted","Data":"7d593c1e3f0a5e603256764fd5bb8645ce34d54c7705119bee13eb08ecbf93e1"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.650073 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs\") pod \"network-metrics-daemon-v2qvr\" (UID: \"d29d8609-2309-45b2-abc7-b4e10ae27eeb\") " pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.664819 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" event={"ID":"fae98298-ad95-4355-9f4c-0f1c159cb0f9","Type":"ContainerStarted","Data":"191fb6f437c3146eab0c76c25b186c494ca3a585c58b339bc110fd406bfcf108"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.697792 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d29d8609-2309-45b2-abc7-b4e10ae27eeb-metrics-certs\") pod \"network-metrics-daemon-v2qvr\" (UID: \"d29d8609-2309-45b2-abc7-b4e10ae27eeb\") " pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.704693 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-rtj4v" podStartSLOduration=41.704674525 podStartE2EDuration="41.704674525s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:51.625599347 +0000 UTC m=+68.444158936" watchObservedRunningTime="2025-12-11 10:35:51.704674525 +0000 UTC m=+68.523234104" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.705808 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" event={"ID":"cb94a68f-794d-4e0f-9a65-aff1b885d021","Type":"ContainerStarted","Data":"482c5e4a166fabf8f98cdb6fbafc3b628326266a1be2d3cb666860cdd6c46f66"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.712484 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vb5zj" podStartSLOduration=41.711953554 podStartE2EDuration="41.711953554s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:51.704141722 +0000 UTC m=+68.522701291" watchObservedRunningTime="2025-12-11 10:35:51.711953554 +0000 UTC m=+68.530513153" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.759888 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-8f46b" podStartSLOduration=41.759871075 podStartE2EDuration="41.759871075s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:51.758566133 +0000 UTC m=+68.577125712" watchObservedRunningTime="2025-12-11 10:35:51.759871075 +0000 UTC m=+68.578430654" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.790466 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2" event={"ID":"1cc10407-5264-4bb5-8223-3ea9a4551c29","Type":"ContainerStarted","Data":"22967108b1bd701701bf82233a97db973546fbce1d684d889d2916ddab7bfa28"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.798695 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skczt" podStartSLOduration=41.79867718 podStartE2EDuration="41.79867718s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:51.790068668 +0000 UTC m=+68.608628247" watchObservedRunningTime="2025-12-11 10:35:51.79867718 +0000 UTC m=+68.617236759" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.803353 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-v2qvr" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.897212 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" event={"ID":"771549fe-a108-4fe9-a461-043432468961","Type":"ContainerStarted","Data":"59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.898172 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.900758 5016 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-w2qk9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.900793 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" podUID="771549fe-a108-4fe9-a461-043432468961" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.901691 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" event={"ID":"a91554fe-759f-4f9a-9d88-7b4d8650a08b","Type":"ContainerStarted","Data":"af4cdd2cafc9fea690f4c35a1e0f4de9d56d8a8d9d4a0c7b8889f7d8c20723fc"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.902574 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.912981 5016 generic.go:334] "Generic (PLEG): container finished" podID="974b6a63-5953-4683-8909-20b4a93856b1" containerID="a04c7f6dc4d3e1bd2c23aeeb6b4c7912ccef90bcf187b68cd4be4131a4f80db5" exitCode=0 Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.913075 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" event={"ID":"974b6a63-5953-4683-8909-20b4a93856b1","Type":"ContainerDied","Data":"a04c7f6dc4d3e1bd2c23aeeb6b4c7912ccef90bcf187b68cd4be4131a4f80db5"} Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.914692 5016 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4k8l5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.24:6443/healthz\": dial tcp 10.217.0.24:6443: connect: connection refused" start-of-body= Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.914719 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.24:6443/healthz\": dial tcp 10.217.0.24:6443: connect: connection refused" Dec 11 10:35:51 crc kubenswrapper[5016]: I1211 10:35:51.979435 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4cp4w\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.007542 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-88s4j" event={"ID":"ed376fff-5d17-48b1-b48c-ec0c3548dde4","Type":"ContainerStarted","Data":"d3786864fbe82871e1e8f0659e7fc3d63a316fdd9c493634f348a4ba731619f8"} Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.008609 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-88s4j" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.043806 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.043884 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.053861 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-7rm8d" event={"ID":"d20858ea-54b5-474f-bdd9-40eb83d42e57","Type":"ContainerStarted","Data":"cfd8db0af1937cbd2b804bc95eecd0f124fd8bb7c2be4b7244782d7254ee152e"} Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.054540 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.054860 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-b485d" podStartSLOduration=43.054846901 podStartE2EDuration="43.054846901s" podCreationTimestamp="2025-12-11 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:52.053367034 +0000 UTC m=+68.871926653" watchObservedRunningTime="2025-12-11 10:35:52.054846901 +0000 UTC m=+68.873406480" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.061112 5016 patch_prober.go:28] interesting pod/console-operator-58897d9998-7rm8d container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.061174 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-7rm8d" podUID="d20858ea-54b5-474f-bdd9-40eb83d42e57" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.128355 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-88s4j" podStartSLOduration=42.128333122 podStartE2EDuration="42.128333122s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:52.126154487 +0000 UTC m=+68.944714086" watchObservedRunningTime="2025-12-11 10:35:52.128333122 +0000 UTC m=+68.946892711" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.280188 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.280661 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.303625 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.319601 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:35:52 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:35:52 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:35:52 crc kubenswrapper[5016]: healthz check failed Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.319723 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.325660 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" podStartSLOduration=43.325640552 podStartE2EDuration="43.325640552s" podCreationTimestamp="2025-12-11 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:52.324833552 +0000 UTC m=+69.143393151" watchObservedRunningTime="2025-12-11 10:35:52.325640552 +0000 UTC m=+69.144200131" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.394966 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-7rm8d" podStartSLOduration=42.394931519 podStartE2EDuration="42.394931519s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:52.39459354 +0000 UTC m=+69.213153129" watchObservedRunningTime="2025-12-11 10:35:52.394931519 +0000 UTC m=+69.213491108" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.500878 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" podStartSLOduration=42.500856368 podStartE2EDuration="42.500856368s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:52.464196165 +0000 UTC m=+69.282755754" watchObservedRunningTime="2025-12-11 10:35:52.500856368 +0000 UTC m=+69.319415957" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.503328 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.543725 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" podStartSLOduration=43.543697764 podStartE2EDuration="43.543697764s" podCreationTimestamp="2025-12-11 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:52.528852558 +0000 UTC m=+69.347412147" watchObservedRunningTime="2025-12-11 10:35:52.543697764 +0000 UTC m=+69.362257353" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.656590 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gkzj6" podStartSLOduration=42.656575074 podStartE2EDuration="42.656575074s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:52.617987624 +0000 UTC m=+69.436547223" watchObservedRunningTime="2025-12-11 10:35:52.656575074 +0000 UTC m=+69.475134653" Dec 11 10:35:52 crc kubenswrapper[5016]: I1211 10:35:52.658498 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=0.658488442 podStartE2EDuration="658.488442ms" podCreationTimestamp="2025-12-11 10:35:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:52.655904488 +0000 UTC m=+69.474464067" watchObservedRunningTime="2025-12-11 10:35:52.658488442 +0000 UTC m=+69.477048021" Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.009180 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-4cp4w"] Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.060173 5016 generic.go:334] "Generic (PLEG): container finished" podID="fae98298-ad95-4355-9f4c-0f1c159cb0f9" containerID="191fb6f437c3146eab0c76c25b186c494ca3a585c58b339bc110fd406bfcf108" exitCode=0 Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.060239 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" event={"ID":"fae98298-ad95-4355-9f4c-0f1c159cb0f9","Type":"ContainerDied","Data":"191fb6f437c3146eab0c76c25b186c494ca3a585c58b339bc110fd406bfcf108"} Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.061088 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" event={"ID":"84d62237-3910-4eeb-845d-2d9c3c5a8d97","Type":"ContainerStarted","Data":"ecd64465dc43b912604fe9897f9ea3b0a36cec2ccaa959ec1801c434e3e60b9e"} Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.067650 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" event={"ID":"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31","Type":"ContainerStarted","Data":"984d81fc085350f6925d335005aecdee7111fee4f424f17b77dd161bc12c6ad1"} Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.071261 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-v2qvr"] Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.073511 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" event={"ID":"86f2da10-45a8-4cc4-9100-3e909d78274f","Type":"ContainerStarted","Data":"636d6c1222091ee7ed9d231fdd6fef1d07c834a5d0e140aa1ca59dca3b93085e"} Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.074175 5016 patch_prober.go:28] interesting pod/console-operator-58897d9998-7rm8d container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.074213 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-7rm8d" podUID="d20858ea-54b5-474f-bdd9-40eb83d42e57" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.075565 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.075592 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.081083 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.084130 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-88cmt" Dec 11 10:35:53 crc kubenswrapper[5016]: W1211 10:35:53.088807 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd29d8609_2309_45b2_abc7_b4e10ae27eeb.slice/crio-de1ad5aee961b6bb5630fb480b54a2dd25813c9b66370b469bab5888686c0ea0 WatchSource:0}: Error finding container de1ad5aee961b6bb5630fb480b54a2dd25813c9b66370b469bab5888686c0ea0: Status 404 returned error can't find the container with id de1ad5aee961b6bb5630fb480b54a2dd25813c9b66370b469bab5888686c0ea0 Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.090731 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.104868 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdl6x" podStartSLOduration=43.104848167 podStartE2EDuration="43.104848167s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:53.10173922 +0000 UTC m=+69.920298819" watchObservedRunningTime="2025-12-11 10:35:53.104848167 +0000 UTC m=+69.923407756" Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.218428 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7xxn4" podStartSLOduration=43.218406904 podStartE2EDuration="43.218406904s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:53.217491322 +0000 UTC m=+70.036050921" watchObservedRunningTime="2025-12-11 10:35:53.218406904 +0000 UTC m=+70.036966493" Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.218922 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podStartSLOduration=43.218915177 podStartE2EDuration="43.218915177s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:53.155136036 +0000 UTC m=+69.973695625" watchObservedRunningTime="2025-12-11 10:35:53.218915177 +0000 UTC m=+70.037474766" Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.305630 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:35:53 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:35:53 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:35:53 crc kubenswrapper[5016]: healthz check failed Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.305700 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.320545 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" podStartSLOduration=43.32051985 podStartE2EDuration="43.32051985s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:53.297510663 +0000 UTC m=+70.116070252" watchObservedRunningTime="2025-12-11 10:35:53.32051985 +0000 UTC m=+70.139079429" Dec 11 10:35:53 crc kubenswrapper[5016]: I1211 10:35:53.971829 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tb4rm" podStartSLOduration=44.971801754 podStartE2EDuration="44.971801754s" podCreationTimestamp="2025-12-11 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:53.969901107 +0000 UTC m=+70.788460686" watchObservedRunningTime="2025-12-11 10:35:53.971801754 +0000 UTC m=+70.790361333" Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.052179 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-p6ggc" podStartSLOduration=44.052162183 podStartE2EDuration="44.052162183s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:54.017221083 +0000 UTC m=+70.835780662" watchObservedRunningTime="2025-12-11 10:35:54.052162183 +0000 UTC m=+70.870721762" Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.087053 5016 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zfqzs container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.087103 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" podUID="9176a2f3-177d-4e8e-80d6-688c3e76ed46" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.094791 5016 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4k8l5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.24:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.094808 5016 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-w2qk9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.094848 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" podUID="771549fe-a108-4fe9-a461-043432468961" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.094847 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.24:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.099050 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-v2qvr" event={"ID":"d29d8609-2309-45b2-abc7-b4e10ae27eeb","Type":"ContainerStarted","Data":"de1ad5aee961b6bb5630fb480b54a2dd25813c9b66370b469bab5888686c0ea0"} Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.100599 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.100889 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.100918 5016 patch_prober.go:28] interesting pod/console-operator-58897d9998-7rm8d container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.100960 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-7rm8d" podUID="d20858ea-54b5-474f-bdd9-40eb83d42e57" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.301920 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:35:54 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:35:54 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:35:54 crc kubenswrapper[5016]: healthz check failed Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.302055 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:35:54 crc kubenswrapper[5016]: E1211 10:35:54.487224 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:35:54 crc kubenswrapper[5016]: E1211 10:35:54.539813 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.556168 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:35:54 crc kubenswrapper[5016]: E1211 10:35:54.600392 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:35:54 crc kubenswrapper[5016]: E1211 10:35:54.600790 5016 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" podUID="ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" containerName="kube-multus-additional-cni-plugins" Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.666304 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:35:54 crc kubenswrapper[5016]: I1211 10:35:54.670747 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zfqzs" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.112261 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" event={"ID":"84d62237-3910-4eeb-845d-2d9c3c5a8d97","Type":"ContainerStarted","Data":"be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.119880 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-zp2gq" event={"ID":"e24b6dae-ea03-4141-a0ab-baf91f6b9ab8","Type":"ContainerStarted","Data":"36b41854a4990602baeb003c071c658ef752f8d2ec56c8682c0d7bc10d8df714"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.126470 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" event={"ID":"01742c1a-692e-462f-a9b1-15dc72332645","Type":"ContainerStarted","Data":"55c7d5182634a22083b9bf28fb3d4d69115ecc33e9a84f7b0443e02d2c444b4d"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.132136 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" event={"ID":"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6","Type":"ContainerStarted","Data":"57a3e51f72fa57962ea9e14b4fe13e5e2db388ad710e75c58e00d3ee1bb953a4"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.136370 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" event={"ID":"fae98298-ad95-4355-9f4c-0f1c159cb0f9","Type":"ContainerStarted","Data":"b12932adaa794565d3412568ca30d95fe4f20bb657aa529cdba0a0c47cd30510"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.136544 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.143892 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" event={"ID":"cb94a68f-794d-4e0f-9a65-aff1b885d021","Type":"ContainerStarted","Data":"a2f8294583d4cbccab9193ae6371434975419ac719e4489c8e4d10161f5534e6"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.149201 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-v2qvr" event={"ID":"d29d8609-2309-45b2-abc7-b4e10ae27eeb","Type":"ContainerStarted","Data":"0fb4c0da9b86a9e3173e1fd9dabaf172919c3d482cdfab0464c85a18769ac61b"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.151996 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" event={"ID":"eceb77a8-82c2-4c74-a0f3-1f6e19a5fa31","Type":"ContainerStarted","Data":"d0a11dfad402feda40723e0892cb54912c17fe947b1e342db69970d211786fdf"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.155394 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" event={"ID":"974b6a63-5953-4683-8909-20b4a93856b1","Type":"ContainerStarted","Data":"0012572f626138b5dafce2fd6b8d11347436204f7fca95f772ad3664943bd83c"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.160130 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" event={"ID":"73e450c1-7bc9-4502-b3c5-e7845ba29342","Type":"ContainerStarted","Data":"8ac3dedf3ebc4d1f0f5388fd29026351106da3ba4c411bad1b3b88a9697a2c07"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.162440 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" event={"ID":"1751c9db-e768-40b7-bc33-1b92ffa26c89","Type":"ContainerStarted","Data":"66f3b7871552bd38224ca2f1ea28a99f7e58996e21cb8e4b8335659be27264f8"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.167500 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-zp2gq" podStartSLOduration=45.167487072 podStartE2EDuration="45.167487072s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:55.16419627 +0000 UTC m=+71.982755849" watchObservedRunningTime="2025-12-11 10:35:55.167487072 +0000 UTC m=+71.986046641" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.175803 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" event={"ID":"86f2da10-45a8-4cc4-9100-3e909d78274f","Type":"ContainerStarted","Data":"03a32ea0722f68414eb6d16e53a169507b830cb5ceec40702ee64288ad3104a6"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.179264 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2" event={"ID":"1cc10407-5264-4bb5-8223-3ea9a4551c29","Type":"ContainerStarted","Data":"2a5a26ace5f09078bdd450d8f268ed8b56d2effa0fc46421edd6e216cacc01ba"} Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.201706 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-tn6f4" podStartSLOduration=45.2016872 podStartE2EDuration="45.2016872s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:55.201617358 +0000 UTC m=+72.020176937" watchObservedRunningTime="2025-12-11 10:35:55.2016872 +0000 UTC m=+72.020246769" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.248114 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-sg9hf" podStartSLOduration=14.248094934 podStartE2EDuration="14.248094934s" podCreationTimestamp="2025-12-11 10:35:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:55.245784336 +0000 UTC m=+72.064343915" watchObservedRunningTime="2025-12-11 10:35:55.248094934 +0000 UTC m=+72.066654503" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.300725 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:35:55 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:35:55 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:35:55 crc kubenswrapper[5016]: healthz check failed Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.300785 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.312461 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" podStartSLOduration=45.312441378 podStartE2EDuration="45.312441378s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:55.30932943 +0000 UTC m=+72.127889019" watchObservedRunningTime="2025-12-11 10:35:55.312441378 +0000 UTC m=+72.131000957" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.350475 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" podStartSLOduration=46.350456391 podStartE2EDuration="46.350456391s" podCreationTimestamp="2025-12-11 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:55.336825749 +0000 UTC m=+72.155385338" watchObservedRunningTime="2025-12-11 10:35:55.350456391 +0000 UTC m=+72.169015970" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.456968 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-2b7gh" podStartSLOduration=45.456922531000004 podStartE2EDuration="45.456922531s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:55.452862739 +0000 UTC m=+72.271422348" watchObservedRunningTime="2025-12-11 10:35:55.456922531 +0000 UTC m=+72.275482110" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.464670 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tp5lv"] Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.469097 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.474347 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.495693 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7242e8c3-6ed6-4613-8fc9-1339be494e56-utilities\") pod \"certified-operators-tp5lv\" (UID: \"7242e8c3-6ed6-4613-8fc9-1339be494e56\") " pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.495735 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7242e8c3-6ed6-4613-8fc9-1339be494e56-catalog-content\") pod \"certified-operators-tp5lv\" (UID: \"7242e8c3-6ed6-4613-8fc9-1339be494e56\") " pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.495756 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tm8r2\" (UniqueName: \"kubernetes.io/projected/7242e8c3-6ed6-4613-8fc9-1339be494e56-kube-api-access-tm8r2\") pod \"certified-operators-tp5lv\" (UID: \"7242e8c3-6ed6-4613-8fc9-1339be494e56\") " pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.564704 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-c5sb2" podStartSLOduration=45.564678593 podStartE2EDuration="45.564678593s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:55.510373351 +0000 UTC m=+72.328932940" watchObservedRunningTime="2025-12-11 10:35:55.564678593 +0000 UTC m=+72.383238162" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.606871 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7242e8c3-6ed6-4613-8fc9-1339be494e56-utilities\") pod \"certified-operators-tp5lv\" (UID: \"7242e8c3-6ed6-4613-8fc9-1339be494e56\") " pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.606926 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7242e8c3-6ed6-4613-8fc9-1339be494e56-catalog-content\") pod \"certified-operators-tp5lv\" (UID: \"7242e8c3-6ed6-4613-8fc9-1339be494e56\") " pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.606970 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tm8r2\" (UniqueName: \"kubernetes.io/projected/7242e8c3-6ed6-4613-8fc9-1339be494e56-kube-api-access-tm8r2\") pod \"certified-operators-tp5lv\" (UID: \"7242e8c3-6ed6-4613-8fc9-1339be494e56\") " pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.607831 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7242e8c3-6ed6-4613-8fc9-1339be494e56-utilities\") pod \"certified-operators-tp5lv\" (UID: \"7242e8c3-6ed6-4613-8fc9-1339be494e56\") " pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.607884 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7242e8c3-6ed6-4613-8fc9-1339be494e56-catalog-content\") pod \"certified-operators-tp5lv\" (UID: \"7242e8c3-6ed6-4613-8fc9-1339be494e56\") " pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.631475 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tp5lv"] Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.631531 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-c6sdb"] Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.633202 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.642676 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c6sdb"] Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.642964 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.713931 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tm8r2\" (UniqueName: \"kubernetes.io/projected/7242e8c3-6ed6-4613-8fc9-1339be494e56-kube-api-access-tm8r2\") pod \"certified-operators-tp5lv\" (UID: \"7242e8c3-6ed6-4613-8fc9-1339be494e56\") " pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.715118 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-utilities\") pod \"community-operators-c6sdb\" (UID: \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\") " pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.715164 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-catalog-content\") pod \"community-operators-c6sdb\" (UID: \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\") " pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.715215 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6n2x\" (UniqueName: \"kubernetes.io/projected/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-kube-api-access-g6n2x\") pod \"community-operators-c6sdb\" (UID: \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\") " pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.793042 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fc9qn"] Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.798926 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.813567 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.816091 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-utilities\") pod \"community-operators-c6sdb\" (UID: \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\") " pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.816137 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-catalog-content\") pod \"community-operators-c6sdb\" (UID: \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\") " pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.816189 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6n2x\" (UniqueName: \"kubernetes.io/projected/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-kube-api-access-g6n2x\") pod \"community-operators-c6sdb\" (UID: \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\") " pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.817285 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-utilities\") pod \"community-operators-c6sdb\" (UID: \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\") " pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.817653 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-catalog-content\") pod \"community-operators-c6sdb\" (UID: \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\") " pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.824009 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fc9qn"] Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.876917 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6n2x\" (UniqueName: \"kubernetes.io/projected/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-kube-api-access-g6n2x\") pod \"community-operators-c6sdb\" (UID: \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\") " pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.918059 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-catalog-content\") pod \"certified-operators-fc9qn\" (UID: \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\") " pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.918141 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-utilities\") pod \"certified-operators-fc9qn\" (UID: \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\") " pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.918171 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7m5zn\" (UniqueName: \"kubernetes.io/projected/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-kube-api-access-7m5zn\") pod \"certified-operators-fc9qn\" (UID: \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\") " pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:35:55 crc kubenswrapper[5016]: I1211 10:35:55.978833 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.019145 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-catalog-content\") pod \"certified-operators-fc9qn\" (UID: \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\") " pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.019228 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-utilities\") pod \"certified-operators-fc9qn\" (UID: \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\") " pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.019258 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7m5zn\" (UniqueName: \"kubernetes.io/projected/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-kube-api-access-7m5zn\") pod \"certified-operators-fc9qn\" (UID: \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\") " pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.020453 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-catalog-content\") pod \"certified-operators-fc9qn\" (UID: \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\") " pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.020693 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-utilities\") pod \"certified-operators-fc9qn\" (UID: \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\") " pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.040113 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-r5rgf"] Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.041178 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.045173 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7m5zn\" (UniqueName: \"kubernetes.io/projected/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-kube-api-access-7m5zn\") pod \"certified-operators-fc9qn\" (UID: \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\") " pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.086691 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r5rgf"] Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.123223 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.223838 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/623ddc04-83e2-42ac-bcac-59b72d2fac2a-utilities\") pod \"community-operators-r5rgf\" (UID: \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\") " pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.224204 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctk8w\" (UniqueName: \"kubernetes.io/projected/623ddc04-83e2-42ac-bcac-59b72d2fac2a-kube-api-access-ctk8w\") pod \"community-operators-r5rgf\" (UID: \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\") " pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.224273 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/623ddc04-83e2-42ac-bcac-59b72d2fac2a-catalog-content\") pod \"community-operators-r5rgf\" (UID: \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\") " pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.227619 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" event={"ID":"c22ea3a4-ad05-4dde-9cc8-0a0365d225a6","Type":"ContainerStarted","Data":"59f5994a00f780725b3e84bb1f0b1d465d1741505a12c7509b7d8b82e64d2391"} Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.228472 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.290086 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" podStartSLOduration=47.290069555 podStartE2EDuration="47.290069555s" podCreationTimestamp="2025-12-11 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:56.287984653 +0000 UTC m=+73.106544252" watchObservedRunningTime="2025-12-11 10:35:56.290069555 +0000 UTC m=+73.108629134" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.296009 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.303545 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:35:56 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:35:56 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:35:56 crc kubenswrapper[5016]: healthz check failed Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.303617 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.323521 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-58t4g" podStartSLOduration=46.323505464 podStartE2EDuration="46.323505464s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:56.320787346 +0000 UTC m=+73.139346925" watchObservedRunningTime="2025-12-11 10:35:56.323505464 +0000 UTC m=+73.142065053" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.324904 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/623ddc04-83e2-42ac-bcac-59b72d2fac2a-catalog-content\") pod \"community-operators-r5rgf\" (UID: \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\") " pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.325017 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/623ddc04-83e2-42ac-bcac-59b72d2fac2a-utilities\") pod \"community-operators-r5rgf\" (UID: \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\") " pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.325066 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctk8w\" (UniqueName: \"kubernetes.io/projected/623ddc04-83e2-42ac-bcac-59b72d2fac2a-kube-api-access-ctk8w\") pod \"community-operators-r5rgf\" (UID: \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\") " pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.325590 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/623ddc04-83e2-42ac-bcac-59b72d2fac2a-catalog-content\") pod \"community-operators-r5rgf\" (UID: \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\") " pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.325731 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/623ddc04-83e2-42ac-bcac-59b72d2fac2a-utilities\") pod \"community-operators-r5rgf\" (UID: \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\") " pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.363424 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctk8w\" (UniqueName: \"kubernetes.io/projected/623ddc04-83e2-42ac-bcac-59b72d2fac2a-kube-api-access-ctk8w\") pod \"community-operators-r5rgf\" (UID: \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\") " pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.376369 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.379044 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.379097 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.379107 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.379153 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.399527 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-jbqpn" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.413608 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" podStartSLOduration=46.413589373 podStartE2EDuration="46.413589373s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:56.412140027 +0000 UTC m=+73.230699606" watchObservedRunningTime="2025-12-11 10:35:56.413589373 +0000 UTC m=+73.232148952" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.414291 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" podStartSLOduration=46.414285051 podStartE2EDuration="46.414285051s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:56.3827769 +0000 UTC m=+73.201336479" watchObservedRunningTime="2025-12-11 10:35:56.414285051 +0000 UTC m=+73.232844620" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.417415 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.418983 5016 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-kp5bk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.419012 5016 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-kp5bk container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.419035 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.419043 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.419270 5016 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-kp5bk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.419294 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.500322 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" podStartSLOduration=46.500296187000004 podStartE2EDuration="46.500296187s" podCreationTimestamp="2025-12-11 10:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:56.469126066 +0000 UTC m=+73.287685655" watchObservedRunningTime="2025-12-11 10:35:56.500296187 +0000 UTC m=+73.318855786" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.568055 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.568343 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.600827 5016 patch_prober.go:28] interesting pod/console-f9d7485db-jpxgn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 11 10:35:56 crc kubenswrapper[5016]: I1211 10:35:56.601351 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-jpxgn" podUID="fa3166f9-577e-4994-9290-7ced66d69dcc" containerName="console" probeResult="failure" output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.054093 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c6sdb"] Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.074427 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tp5lv"] Dec 11 10:35:57 crc kubenswrapper[5016]: W1211 10:35:57.107386 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7242e8c3_6ed6_4613_8fc9_1339be494e56.slice/crio-8d609d97af6c76a71e621a36eb0291d626e442f43edaccf9915e8184aa7d664e WatchSource:0}: Error finding container 8d609d97af6c76a71e621a36eb0291d626e442f43edaccf9915e8184aa7d664e: Status 404 returned error can't find the container with id 8d609d97af6c76a71e621a36eb0291d626e442f43edaccf9915e8184aa7d664e Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.289150 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r5rgf"] Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.299009 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6sdb" event={"ID":"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7","Type":"ContainerStarted","Data":"4a93dbeda8cea431290536d59a21661e49598f9b7708a5e611ae319bf2757f73"} Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.302983 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:35:57 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:35:57 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:35:57 crc kubenswrapper[5016]: healthz check failed Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.303029 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.311203 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-v2qvr" event={"ID":"d29d8609-2309-45b2-abc7-b4e10ae27eeb","Type":"ContainerStarted","Data":"33aba8d3c80aff535dc486c39f28f5feaf0f91fcf5cf5e8dd99683e64801e1b6"} Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.313035 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tp5lv" event={"ID":"7242e8c3-6ed6-4613-8fc9-1339be494e56","Type":"ContainerStarted","Data":"8d609d97af6c76a71e621a36eb0291d626e442f43edaccf9915e8184aa7d664e"} Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.513890 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fc9qn"] Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.523626 5016 patch_prober.go:28] interesting pod/console-operator-58897d9998-7rm8d container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.523681 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-7rm8d" podUID="d20858ea-54b5-474f-bdd9-40eb83d42e57" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.584189 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hqmxw"] Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.585576 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.597448 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hqmxw"] Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.598156 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.664113 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc7ml\" (UniqueName: \"kubernetes.io/projected/f393088a-dacc-4673-8074-d6be25842a84-kube-api-access-gc7ml\") pod \"redhat-marketplace-hqmxw\" (UID: \"f393088a-dacc-4673-8074-d6be25842a84\") " pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.664163 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f393088a-dacc-4673-8074-d6be25842a84-utilities\") pod \"redhat-marketplace-hqmxw\" (UID: \"f393088a-dacc-4673-8074-d6be25842a84\") " pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.664363 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f393088a-dacc-4673-8074-d6be25842a84-catalog-content\") pod \"redhat-marketplace-hqmxw\" (UID: \"f393088a-dacc-4673-8074-d6be25842a84\") " pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.765760 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f393088a-dacc-4673-8074-d6be25842a84-catalog-content\") pod \"redhat-marketplace-hqmxw\" (UID: \"f393088a-dacc-4673-8074-d6be25842a84\") " pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.765868 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc7ml\" (UniqueName: \"kubernetes.io/projected/f393088a-dacc-4673-8074-d6be25842a84-kube-api-access-gc7ml\") pod \"redhat-marketplace-hqmxw\" (UID: \"f393088a-dacc-4673-8074-d6be25842a84\") " pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.765900 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f393088a-dacc-4673-8074-d6be25842a84-utilities\") pod \"redhat-marketplace-hqmxw\" (UID: \"f393088a-dacc-4673-8074-d6be25842a84\") " pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.766358 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f393088a-dacc-4673-8074-d6be25842a84-catalog-content\") pod \"redhat-marketplace-hqmxw\" (UID: \"f393088a-dacc-4673-8074-d6be25842a84\") " pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.766423 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f393088a-dacc-4673-8074-d6be25842a84-utilities\") pod \"redhat-marketplace-hqmxw\" (UID: \"f393088a-dacc-4673-8074-d6be25842a84\") " pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.787039 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc7ml\" (UniqueName: \"kubernetes.io/projected/f393088a-dacc-4673-8074-d6be25842a84-kube-api-access-gc7ml\") pod \"redhat-marketplace-hqmxw\" (UID: \"f393088a-dacc-4673-8074-d6be25842a84\") " pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.917035 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.956078 5016 patch_prober.go:28] interesting pod/console-operator-58897d9998-7rm8d container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.956171 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-7rm8d" podUID="d20858ea-54b5-474f-bdd9-40eb83d42e57" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 10:35:57 crc kubenswrapper[5016]: I1211 10:35:57.999128 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vb25j"] Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.000449 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.172060 5016 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-qldpr container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.172402 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" podUID="fae98298-ad95-4355-9f4c-0f1c159cb0f9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.172067 5016 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-qldpr container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.172522 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" podUID="fae98298-ad95-4355-9f4c-0f1c159cb0f9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.172670 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwsjx\" (UniqueName: \"kubernetes.io/projected/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-kube-api-access-nwsjx\") pod \"redhat-marketplace-vb25j\" (UID: \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\") " pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.172705 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-utilities\") pod \"redhat-marketplace-vb25j\" (UID: \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\") " pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.172741 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-catalog-content\") pod \"redhat-marketplace-vb25j\" (UID: \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\") " pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.273888 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwsjx\" (UniqueName: \"kubernetes.io/projected/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-kube-api-access-nwsjx\") pod \"redhat-marketplace-vb25j\" (UID: \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\") " pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.273991 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-utilities\") pod \"redhat-marketplace-vb25j\" (UID: \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\") " pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.274037 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-catalog-content\") pod \"redhat-marketplace-vb25j\" (UID: \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\") " pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.274997 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-catalog-content\") pod \"redhat-marketplace-vb25j\" (UID: \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\") " pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.275101 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-utilities\") pod \"redhat-marketplace-vb25j\" (UID: \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\") " pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.298806 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:35:58 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:35:58 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:35:58 crc kubenswrapper[5016]: healthz check failed Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.298873 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.319288 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r5rgf" event={"ID":"623ddc04-83e2-42ac-bcac-59b72d2fac2a","Type":"ContainerStarted","Data":"927c291325ed36a7a29e3781145edeb3363d491148d73f8f1b4aae3e43303023"} Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.320425 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc9qn" event={"ID":"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4","Type":"ContainerStarted","Data":"fce4f97e5cfa2867c9b7f8677c1d308cafd11e58a1bad0e63c79d1d3bea75cbf"} Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.649061 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vb25j"] Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.788791 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwsjx\" (UniqueName: \"kubernetes.io/projected/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-kube-api-access-nwsjx\") pod \"redhat-marketplace-vb25j\" (UID: \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\") " pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.834586 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hqmxw"] Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.881548 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rx8bv"] Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.883157 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:35:58 crc kubenswrapper[5016]: W1211 10:35:58.887253 5016 reflector.go:561] object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh": failed to list *v1.Secret: secrets "redhat-operators-dockercfg-ct8rh" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-marketplace": no relationship found between node 'crc' and this object Dec 11 10:35:58 crc kubenswrapper[5016]: E1211 10:35:58.887313 5016 reflector.go:158] "Unhandled Error" err="object-\"openshift-marketplace\"/\"redhat-operators-dockercfg-ct8rh\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"redhat-operators-dockercfg-ct8rh\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-marketplace\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.909104 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-v2qvr" podStartSLOduration=49.909088368 podStartE2EDuration="49.909088368s" podCreationTimestamp="2025-12-11 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:35:58.907439647 +0000 UTC m=+75.725999226" watchObservedRunningTime="2025-12-11 10:35:58.909088368 +0000 UTC m=+75.727647947" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.916280 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.988603 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f07c0be-3ff2-4b4a-86f1-67da5394f101-catalog-content\") pod \"redhat-operators-rx8bv\" (UID: \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\") " pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.988672 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f07c0be-3ff2-4b4a-86f1-67da5394f101-utilities\") pod \"redhat-operators-rx8bv\" (UID: \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\") " pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:35:58 crc kubenswrapper[5016]: I1211 10:35:58.988689 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4jhp\" (UniqueName: \"kubernetes.io/projected/2f07c0be-3ff2-4b4a-86f1-67da5394f101-kube-api-access-n4jhp\") pod \"redhat-operators-rx8bv\" (UID: \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\") " pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:35:59 crc kubenswrapper[5016]: W1211 10:35:59.014551 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf393088a_dacc_4673_8074_d6be25842a84.slice/crio-307bb5b083621eb15ca65bac079066523d83b4989fd45c9448ed08fb99078fbd WatchSource:0}: Error finding container 307bb5b083621eb15ca65bac079066523d83b4989fd45c9448ed08fb99078fbd: Status 404 returned error can't find the container with id 307bb5b083621eb15ca65bac079066523d83b4989fd45c9448ed08fb99078fbd Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.089591 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f07c0be-3ff2-4b4a-86f1-67da5394f101-catalog-content\") pod \"redhat-operators-rx8bv\" (UID: \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\") " pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.089668 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f07c0be-3ff2-4b4a-86f1-67da5394f101-utilities\") pod \"redhat-operators-rx8bv\" (UID: \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\") " pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.089696 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4jhp\" (UniqueName: \"kubernetes.io/projected/2f07c0be-3ff2-4b4a-86f1-67da5394f101-kube-api-access-n4jhp\") pod \"redhat-operators-rx8bv\" (UID: \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\") " pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.100328 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f07c0be-3ff2-4b4a-86f1-67da5394f101-utilities\") pod \"redhat-operators-rx8bv\" (UID: \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\") " pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.100912 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f07c0be-3ff2-4b4a-86f1-67da5394f101-catalog-content\") pod \"redhat-operators-rx8bv\" (UID: \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\") " pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.107725 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rx8bv"] Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.141364 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.142146 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.146417 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.146624 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.172279 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4jhp\" (UniqueName: \"kubernetes.io/projected/2f07c0be-3ff2-4b4a-86f1-67da5394f101-kube-api-access-n4jhp\") pod \"redhat-operators-rx8bv\" (UID: \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\") " pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.184116 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.205781 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rzcjf"] Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.206770 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.269481 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rzcjf"] Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.292087 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a586cf38-b0a7-48ae-9563-8001393b7540-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a586cf38-b0a7-48ae-9563-8001393b7540\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.292191 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqbf7\" (UniqueName: \"kubernetes.io/projected/89fda315-d1f2-484a-aa91-ec75f0b0227e-kube-api-access-xqbf7\") pod \"redhat-operators-rzcjf\" (UID: \"89fda315-d1f2-484a-aa91-ec75f0b0227e\") " pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.292256 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89fda315-d1f2-484a-aa91-ec75f0b0227e-catalog-content\") pod \"redhat-operators-rzcjf\" (UID: \"89fda315-d1f2-484a-aa91-ec75f0b0227e\") " pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.292314 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a586cf38-b0a7-48ae-9563-8001393b7540-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a586cf38-b0a7-48ae-9563-8001393b7540\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.292341 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89fda315-d1f2-484a-aa91-ec75f0b0227e-utilities\") pod \"redhat-operators-rzcjf\" (UID: \"89fda315-d1f2-484a-aa91-ec75f0b0227e\") " pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.301619 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:35:59 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:35:59 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:35:59 crc kubenswrapper[5016]: healthz check failed Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.301672 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.341892 5016 generic.go:334] "Generic (PLEG): container finished" podID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" containerID="60b6b08c0d83f6b3f2b5afa6ab72aee259cd39dfcbf1e63c535917fd9155e570" exitCode=0 Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.342182 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6sdb" event={"ID":"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7","Type":"ContainerDied","Data":"60b6b08c0d83f6b3f2b5afa6ab72aee259cd39dfcbf1e63c535917fd9155e570"} Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.344849 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.350443 5016 generic.go:334] "Generic (PLEG): container finished" podID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" containerID="bc00277dce91936233ab5a9a197e44358d060fb7dfca16dce668703e745492a5" exitCode=0 Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.350502 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc9qn" event={"ID":"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4","Type":"ContainerDied","Data":"bc00277dce91936233ab5a9a197e44358d060fb7dfca16dce668703e745492a5"} Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.361324 5016 generic.go:334] "Generic (PLEG): container finished" podID="67dd196e-3271-4222-aa21-dfaf3278eee0" containerID="a935f10f6a83a521998e85aff4db8e97b019c18856bc6745278ee7decbad7bcf" exitCode=0 Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.361410 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" event={"ID":"67dd196e-3271-4222-aa21-dfaf3278eee0","Type":"ContainerDied","Data":"a935f10f6a83a521998e85aff4db8e97b019c18856bc6745278ee7decbad7bcf"} Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.366523 5016 generic.go:334] "Generic (PLEG): container finished" podID="7242e8c3-6ed6-4613-8fc9-1339be494e56" containerID="7748e0cb21700e427bc30f167c4e4654d84bf11b31376fa6c0fcb57f3df3a33a" exitCode=0 Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.366611 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tp5lv" event={"ID":"7242e8c3-6ed6-4613-8fc9-1339be494e56","Type":"ContainerDied","Data":"7748e0cb21700e427bc30f167c4e4654d84bf11b31376fa6c0fcb57f3df3a33a"} Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.377883 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hqmxw" event={"ID":"f393088a-dacc-4673-8074-d6be25842a84","Type":"ContainerStarted","Data":"307bb5b083621eb15ca65bac079066523d83b4989fd45c9448ed08fb99078fbd"} Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.393276 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a586cf38-b0a7-48ae-9563-8001393b7540-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a586cf38-b0a7-48ae-9563-8001393b7540\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.393313 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89fda315-d1f2-484a-aa91-ec75f0b0227e-utilities\") pod \"redhat-operators-rzcjf\" (UID: \"89fda315-d1f2-484a-aa91-ec75f0b0227e\") " pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.393344 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a586cf38-b0a7-48ae-9563-8001393b7540-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a586cf38-b0a7-48ae-9563-8001393b7540\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.393389 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqbf7\" (UniqueName: \"kubernetes.io/projected/89fda315-d1f2-484a-aa91-ec75f0b0227e-kube-api-access-xqbf7\") pod \"redhat-operators-rzcjf\" (UID: \"89fda315-d1f2-484a-aa91-ec75f0b0227e\") " pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.393429 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89fda315-d1f2-484a-aa91-ec75f0b0227e-catalog-content\") pod \"redhat-operators-rzcjf\" (UID: \"89fda315-d1f2-484a-aa91-ec75f0b0227e\") " pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.393710 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a586cf38-b0a7-48ae-9563-8001393b7540-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a586cf38-b0a7-48ae-9563-8001393b7540\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.394090 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89fda315-d1f2-484a-aa91-ec75f0b0227e-catalog-content\") pod \"redhat-operators-rzcjf\" (UID: \"89fda315-d1f2-484a-aa91-ec75f0b0227e\") " pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.394324 5016 generic.go:334] "Generic (PLEG): container finished" podID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" containerID="6d835a48c6d414c9097a434358701ace6f08f0bc9d08c01c978ed8c4201befb3" exitCode=0 Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.394385 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r5rgf" event={"ID":"623ddc04-83e2-42ac-bcac-59b72d2fac2a","Type":"ContainerDied","Data":"6d835a48c6d414c9097a434358701ace6f08f0bc9d08c01c978ed8c4201befb3"} Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.396609 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89fda315-d1f2-484a-aa91-ec75f0b0227e-utilities\") pod \"redhat-operators-rzcjf\" (UID: \"89fda315-d1f2-484a-aa91-ec75f0b0227e\") " pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.431620 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a586cf38-b0a7-48ae-9563-8001393b7540-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a586cf38-b0a7-48ae-9563-8001393b7540\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.433724 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqbf7\" (UniqueName: \"kubernetes.io/projected/89fda315-d1f2-484a-aa91-ec75f0b0227e-kube-api-access-xqbf7\") pod \"redhat-operators-rzcjf\" (UID: \"89fda315-d1f2-484a-aa91-ec75f0b0227e\") " pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.462393 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.860409 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vb25j"] Dec 11 10:35:59 crc kubenswrapper[5016]: I1211 10:35:59.926887 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 11 10:36:00 crc kubenswrapper[5016]: I1211 10:36:00.298799 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:00 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:00 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:00 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:00 crc kubenswrapper[5016]: I1211 10:36:00.298861 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:00 crc kubenswrapper[5016]: I1211 10:36:00.316040 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 11 10:36:00 crc kubenswrapper[5016]: I1211 10:36:00.318402 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:36:00 crc kubenswrapper[5016]: I1211 10:36:00.322348 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:36:00 crc kubenswrapper[5016]: I1211 10:36:00.574408 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hqmxw" event={"ID":"f393088a-dacc-4673-8074-d6be25842a84","Type":"ContainerStarted","Data":"29167e1f7fdca5acf9740a43d23a4cb85769ad7d1f5e6fcbe0f80600e46fd422"} Dec 11 10:36:00 crc kubenswrapper[5016]: I1211 10:36:00.576071 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a586cf38-b0a7-48ae-9563-8001393b7540","Type":"ContainerStarted","Data":"d6d05dfb7c6ec18a4cc8fb67be773a5f63bcc1878797417b9028cb7c44990fca"} Dec 11 10:36:00 crc kubenswrapper[5016]: I1211 10:36:00.585839 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vb25j" event={"ID":"edb91373-b8a5-4426-9a6b-1fbb6c9f2846","Type":"ContainerStarted","Data":"f74cbaf4b2b284ec3a59de230d87650b678e7c27a1722e13358d539c64c19d8e"} Dec 11 10:36:00 crc kubenswrapper[5016]: I1211 10:36:00.932378 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rx8bv"] Dec 11 10:36:00 crc kubenswrapper[5016]: W1211 10:36:00.943588 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2f07c0be_3ff2_4b4a_86f1_67da5394f101.slice/crio-c51e5ecbbd36a9a56f608cc8c02fa75beb78930580e8d790fb98a6702442a885 WatchSource:0}: Error finding container c51e5ecbbd36a9a56f608cc8c02fa75beb78930580e8d790fb98a6702442a885: Status 404 returned error can't find the container with id c51e5ecbbd36a9a56f608cc8c02fa75beb78930580e8d790fb98a6702442a885 Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.001135 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rzcjf"] Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.057367 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.126268 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.126323 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.130318 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67dd196e-3271-4222-aa21-dfaf3278eee0-config-volume\") pod \"67dd196e-3271-4222-aa21-dfaf3278eee0\" (UID: \"67dd196e-3271-4222-aa21-dfaf3278eee0\") " Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.130378 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb596\" (UniqueName: \"kubernetes.io/projected/67dd196e-3271-4222-aa21-dfaf3278eee0-kube-api-access-sb596\") pod \"67dd196e-3271-4222-aa21-dfaf3278eee0\" (UID: \"67dd196e-3271-4222-aa21-dfaf3278eee0\") " Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.130419 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67dd196e-3271-4222-aa21-dfaf3278eee0-secret-volume\") pod \"67dd196e-3271-4222-aa21-dfaf3278eee0\" (UID: \"67dd196e-3271-4222-aa21-dfaf3278eee0\") " Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.131461 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67dd196e-3271-4222-aa21-dfaf3278eee0-config-volume" (OuterVolumeSpecName: "config-volume") pod "67dd196e-3271-4222-aa21-dfaf3278eee0" (UID: "67dd196e-3271-4222-aa21-dfaf3278eee0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.140420 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67dd196e-3271-4222-aa21-dfaf3278eee0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "67dd196e-3271-4222-aa21-dfaf3278eee0" (UID: "67dd196e-3271-4222-aa21-dfaf3278eee0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.154172 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.154564 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.155927 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67dd196e-3271-4222-aa21-dfaf3278eee0-kube-api-access-sb596" (OuterVolumeSpecName: "kube-api-access-sb596") pod "67dd196e-3271-4222-aa21-dfaf3278eee0" (UID: "67dd196e-3271-4222-aa21-dfaf3278eee0"). InnerVolumeSpecName "kube-api-access-sb596". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.232196 5016 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67dd196e-3271-4222-aa21-dfaf3278eee0-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.232233 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb596\" (UniqueName: \"kubernetes.io/projected/67dd196e-3271-4222-aa21-dfaf3278eee0-kube-api-access-sb596\") on node \"crc\" DevicePath \"\"" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.232280 5016 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67dd196e-3271-4222-aa21-dfaf3278eee0-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.301548 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:01 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:01 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:01 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.301601 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.340973 5016 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-v75dd container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 11 10:36:01 crc kubenswrapper[5016]: [+]log ok Dec 11 10:36:01 crc kubenswrapper[5016]: [+]etcd ok Dec 11 10:36:01 crc kubenswrapper[5016]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 11 10:36:01 crc kubenswrapper[5016]: [-]poststarthook/generic-apiserver-start-informers failed: reason withheld Dec 11 10:36:01 crc kubenswrapper[5016]: [+]poststarthook/max-in-flight-filter ok Dec 11 10:36:01 crc kubenswrapper[5016]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 11 10:36:01 crc kubenswrapper[5016]: [+]poststarthook/openshift.io-StartUserInformer ok Dec 11 10:36:01 crc kubenswrapper[5016]: [+]poststarthook/openshift.io-StartOAuthInformer ok Dec 11 10:36:01 crc kubenswrapper[5016]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Dec 11 10:36:01 crc kubenswrapper[5016]: livez check failed Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.341022 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" podUID="974b6a63-5953-4683-8909-20b4a93856b1" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.459296 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 11 10:36:01 crc kubenswrapper[5016]: E1211 10:36:01.459577 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67dd196e-3271-4222-aa21-dfaf3278eee0" containerName="collect-profiles" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.459597 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="67dd196e-3271-4222-aa21-dfaf3278eee0" containerName="collect-profiles" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.459735 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="67dd196e-3271-4222-aa21-dfaf3278eee0" containerName="collect-profiles" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.461606 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.464357 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.468174 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.483538 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.536272 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/68ee6f31-89d2-46b9-8617-646c6b7d6fed-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"68ee6f31-89d2-46b9-8617-646c6b7d6fed\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.536316 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/68ee6f31-89d2-46b9-8617-646c6b7d6fed-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"68ee6f31-89d2-46b9-8617-646c6b7d6fed\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.590865 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcjf" event={"ID":"89fda315-d1f2-484a-aa91-ec75f0b0227e","Type":"ContainerStarted","Data":"d63b4a95dba1ff0eef97f235c276d3dbb696c39120318f9a03258442070a990c"} Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.592622 5016 generic.go:334] "Generic (PLEG): container finished" podID="f393088a-dacc-4673-8074-d6be25842a84" containerID="29167e1f7fdca5acf9740a43d23a4cb85769ad7d1f5e6fcbe0f80600e46fd422" exitCode=0 Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.592676 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hqmxw" event={"ID":"f393088a-dacc-4673-8074-d6be25842a84","Type":"ContainerDied","Data":"29167e1f7fdca5acf9740a43d23a4cb85769ad7d1f5e6fcbe0f80600e46fd422"} Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.593906 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rx8bv" event={"ID":"2f07c0be-3ff2-4b4a-86f1-67da5394f101","Type":"ContainerStarted","Data":"c51e5ecbbd36a9a56f608cc8c02fa75beb78930580e8d790fb98a6702442a885"} Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.597061 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" event={"ID":"67dd196e-3271-4222-aa21-dfaf3278eee0","Type":"ContainerDied","Data":"5358e8083144177c466c2c9f1f78848b5305a31168b3c6498ee73d9426bbd12b"} Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.597133 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5358e8083144177c466c2c9f1f78848b5305a31168b3c6498ee73d9426bbd12b" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.597143 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.637717 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/68ee6f31-89d2-46b9-8617-646c6b7d6fed-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"68ee6f31-89d2-46b9-8617-646c6b7d6fed\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.637772 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/68ee6f31-89d2-46b9-8617-646c6b7d6fed-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"68ee6f31-89d2-46b9-8617-646c6b7d6fed\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.637851 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/68ee6f31-89d2-46b9-8617-646c6b7d6fed-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"68ee6f31-89d2-46b9-8617-646c6b7d6fed\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.656269 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/68ee6f31-89d2-46b9-8617-646c6b7d6fed-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"68ee6f31-89d2-46b9-8617-646c6b7d6fed\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 10:36:01 crc kubenswrapper[5016]: I1211 10:36:01.786107 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 10:36:02 crc kubenswrapper[5016]: I1211 10:36:02.189128 5016 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-qldpr container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.16:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:36:02 crc kubenswrapper[5016]: I1211 10:36:02.189457 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" podUID="fae98298-ad95-4355-9f4c-0f1c159cb0f9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.16:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 10:36:02 crc kubenswrapper[5016]: I1211 10:36:02.195050 5016 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-qldpr container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.16:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:36:02 crc kubenswrapper[5016]: I1211 10:36:02.195148 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" podUID="fae98298-ad95-4355-9f4c-0f1c159cb0f9" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.16:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 10:36:02 crc kubenswrapper[5016]: I1211 10:36:02.281518 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:36:02 crc kubenswrapper[5016]: I1211 10:36:02.298817 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:02 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:02 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:02 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:02 crc kubenswrapper[5016]: I1211 10:36:02.298881 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:03 crc kubenswrapper[5016]: I1211 10:36:03.190353 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 11 10:36:03 crc kubenswrapper[5016]: I1211 10:36:03.306480 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:03 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:03 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:03 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:03 crc kubenswrapper[5016]: I1211 10:36:03.306537 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:03 crc kubenswrapper[5016]: I1211 10:36:03.607466 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"68ee6f31-89d2-46b9-8617-646c6b7d6fed","Type":"ContainerStarted","Data":"da5ca759f73e0fffa68cce9a1d470dc3b9a2d4dea45ae31c6de65bde47fd60bf"} Dec 11 10:36:04 crc kubenswrapper[5016]: I1211 10:36:04.181831 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qldpr" Dec 11 10:36:04 crc kubenswrapper[5016]: I1211 10:36:04.297751 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:04 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:04 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:04 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:04 crc kubenswrapper[5016]: I1211 10:36:04.297802 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:04 crc kubenswrapper[5016]: E1211 10:36:04.443362 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:04 crc kubenswrapper[5016]: E1211 10:36:04.444766 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:04 crc kubenswrapper[5016]: E1211 10:36:04.445831 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:04 crc kubenswrapper[5016]: E1211 10:36:04.445978 5016 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" podUID="ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" containerName="kube-multus-additional-cni-plugins" Dec 11 10:36:05 crc kubenswrapper[5016]: I1211 10:36:05.301109 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:05 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:05 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:05 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:05 crc kubenswrapper[5016]: I1211 10:36:05.301170 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:06 crc kubenswrapper[5016]: I1211 10:36:06.134357 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:36:06 crc kubenswrapper[5016]: I1211 10:36:06.140723 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-v75dd" Dec 11 10:36:06 crc kubenswrapper[5016]: I1211 10:36:06.297570 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:06 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:06 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:06 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:06 crc kubenswrapper[5016]: I1211 10:36:06.297626 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:06 crc kubenswrapper[5016]: I1211 10:36:06.380633 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:36:06 crc kubenswrapper[5016]: I1211 10:36:06.380878 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:36:06 crc kubenswrapper[5016]: I1211 10:36:06.380633 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:36:06 crc kubenswrapper[5016]: I1211 10:36:06.380927 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:36:06 crc kubenswrapper[5016]: I1211 10:36:06.423324 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:36:06 crc kubenswrapper[5016]: I1211 10:36:06.562998 5016 patch_prober.go:28] interesting pod/console-f9d7485db-jpxgn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 11 10:36:06 crc kubenswrapper[5016]: I1211 10:36:06.563060 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-jpxgn" podUID="fa3166f9-577e-4994-9290-7ced66d69dcc" containerName="console" probeResult="failure" output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 11 10:36:06 crc kubenswrapper[5016]: I1211 10:36:06.941652 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-7rm8d" Dec 11 10:36:07 crc kubenswrapper[5016]: I1211 10:36:07.298110 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:07 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:07 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:07 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:07 crc kubenswrapper[5016]: I1211 10:36:07.298201 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:07 crc kubenswrapper[5016]: I1211 10:36:07.678112 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vb25j" event={"ID":"edb91373-b8a5-4426-9a6b-1fbb6c9f2846","Type":"ContainerStarted","Data":"f2bd2d9b047ad76d526e427054224496aa8abb4a09d50c5b645da64c4c8f3410"} Dec 11 10:36:07 crc kubenswrapper[5016]: I1211 10:36:07.785500 5016 patch_prober.go:28] interesting pod/apiserver-76f77b778f-hbw4j container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]log ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]etcd ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 11 10:36:07 crc kubenswrapper[5016]: [-]poststarthook/generic-apiserver-start-informers failed: reason withheld Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/max-in-flight-filter ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/image.openshift.io-apiserver-caches ok Dec 11 10:36:07 crc kubenswrapper[5016]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Dec 11 10:36:07 crc kubenswrapper[5016]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/project.openshift.io-projectcache ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Dec 11 10:36:07 crc kubenswrapper[5016]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/openshift.io-restmapperupdater ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 11 10:36:07 crc kubenswrapper[5016]: livez check failed Dec 11 10:36:07 crc kubenswrapper[5016]: I1211 10:36:07.785563 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" podUID="c22ea3a4-ad05-4dde-9cc8-0a0365d225a6" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:07 crc kubenswrapper[5016]: I1211 10:36:07.821343 5016 patch_prober.go:28] interesting pod/apiserver-76f77b778f-hbw4j container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]log ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]etcd ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/generic-apiserver-start-informers ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/max-in-flight-filter ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/image.openshift.io-apiserver-caches ok Dec 11 10:36:07 crc kubenswrapper[5016]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Dec 11 10:36:07 crc kubenswrapper[5016]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/project.openshift.io-projectcache ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/openshift.io-startinformers ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/openshift.io-restmapperupdater ok Dec 11 10:36:07 crc kubenswrapper[5016]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 11 10:36:07 crc kubenswrapper[5016]: livez check failed Dec 11 10:36:07 crc kubenswrapper[5016]: I1211 10:36:07.821400 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" podUID="c22ea3a4-ad05-4dde-9cc8-0a0365d225a6" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:08 crc kubenswrapper[5016]: I1211 10:36:08.298744 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:08 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:08 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:08 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:08 crc kubenswrapper[5016]: I1211 10:36:08.299082 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.239577 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.239663 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.240612 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.249422 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.298818 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:09 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:09 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:09 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.298884 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.343745 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.343813 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.348085 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.367634 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.492837 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.519549 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.585608 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.703867 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"68ee6f31-89d2-46b9-8617-646c6b7d6fed","Type":"ContainerStarted","Data":"eb9d6d3b43905ee6983cdb6d11ac9e901f9e7081b35ab896e47fbb9541f580f0"} Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.705892 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcjf" event={"ID":"89fda315-d1f2-484a-aa91-ec75f0b0227e","Type":"ContainerStarted","Data":"8ada3a6b0f9bf46f5715860770521b5f303a501d0e0a332446f52bc00a63d9d4"} Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.708236 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a586cf38-b0a7-48ae-9563-8001393b7540","Type":"ContainerStarted","Data":"8e499ab13b3315328b5eaf04cf94dec6b5b7f38ab1ebbd35522c7e748fc7e5c0"} Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.712265 5016 generic.go:334] "Generic (PLEG): container finished" podID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" containerID="f2bd2d9b047ad76d526e427054224496aa8abb4a09d50c5b645da64c4c8f3410" exitCode=0 Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.712326 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vb25j" event={"ID":"edb91373-b8a5-4426-9a6b-1fbb6c9f2846","Type":"ContainerDied","Data":"f2bd2d9b047ad76d526e427054224496aa8abb4a09d50c5b645da64c4c8f3410"} Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.716323 5016 generic.go:334] "Generic (PLEG): container finished" podID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" containerID="5784e5504e28ad0d0917a0bc4f0bd4afcfa36d4c9f3fcc2bdf500f2454f8c84c" exitCode=0 Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.716369 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rx8bv" event={"ID":"2f07c0be-3ff2-4b4a-86f1-67da5394f101","Type":"ContainerDied","Data":"5784e5504e28ad0d0917a0bc4f0bd4afcfa36d4c9f3fcc2bdf500f2454f8c84c"} Dec 11 10:36:09 crc kubenswrapper[5016]: I1211 10:36:09.734676 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=10.734655471 podStartE2EDuration="10.734655471s" podCreationTimestamp="2025-12-11 10:35:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:36:09.734314692 +0000 UTC m=+86.552874261" watchObservedRunningTime="2025-12-11 10:36:09.734655471 +0000 UTC m=+86.553215050" Dec 11 10:36:10 crc kubenswrapper[5016]: W1211 10:36:10.196393 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-572d59120b033ce9de211049c8945677d380144433c9338c709394ce5edc1200 WatchSource:0}: Error finding container 572d59120b033ce9de211049c8945677d380144433c9338c709394ce5edc1200: Status 404 returned error can't find the container with id 572d59120b033ce9de211049c8945677d380144433c9338c709394ce5edc1200 Dec 11 10:36:10 crc kubenswrapper[5016]: I1211 10:36:10.298326 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:10 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:10 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:10 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:10 crc kubenswrapper[5016]: I1211 10:36:10.298398 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:10 crc kubenswrapper[5016]: I1211 10:36:10.724810 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"572d59120b033ce9de211049c8945677d380144433c9338c709394ce5edc1200"} Dec 11 10:36:10 crc kubenswrapper[5016]: I1211 10:36:10.726783 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"01efb18513c7657eaaa1b6f0fe46f8e55df7a3bd4cfed03ca9080e10b27b8adf"} Dec 11 10:36:10 crc kubenswrapper[5016]: I1211 10:36:10.730383 5016 generic.go:334] "Generic (PLEG): container finished" podID="89fda315-d1f2-484a-aa91-ec75f0b0227e" containerID="8ada3a6b0f9bf46f5715860770521b5f303a501d0e0a332446f52bc00a63d9d4" exitCode=0 Dec 11 10:36:10 crc kubenswrapper[5016]: I1211 10:36:10.730477 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcjf" event={"ID":"89fda315-d1f2-484a-aa91-ec75f0b0227e","Type":"ContainerDied","Data":"8ada3a6b0f9bf46f5715860770521b5f303a501d0e0a332446f52bc00a63d9d4"} Dec 11 10:36:10 crc kubenswrapper[5016]: I1211 10:36:10.739101 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"6738ab26ea5924b4cf196071ece06d9ec3737b9d20504f17d72cec25971ad12b"} Dec 11 10:36:11 crc kubenswrapper[5016]: I1211 10:36:11.158811 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:36:11 crc kubenswrapper[5016]: I1211 10:36:11.165501 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-hbw4j" Dec 11 10:36:11 crc kubenswrapper[5016]: I1211 10:36:11.192275 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=10.192255347 podStartE2EDuration="10.192255347s" podCreationTimestamp="2025-12-11 10:36:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:36:10.780978162 +0000 UTC m=+87.599537761" watchObservedRunningTime="2025-12-11 10:36:11.192255347 +0000 UTC m=+88.010814936" Dec 11 10:36:11 crc kubenswrapper[5016]: I1211 10:36:11.299425 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:11 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:11 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:11 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:11 crc kubenswrapper[5016]: I1211 10:36:11.299605 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:11 crc kubenswrapper[5016]: I1211 10:36:11.750236 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"4761dc1da29434a6d63cacff375d876e973a408ee3faa6e44a3b57731a0104da"} Dec 11 10:36:11 crc kubenswrapper[5016]: I1211 10:36:11.753778 5016 generic.go:334] "Generic (PLEG): container finished" podID="68ee6f31-89d2-46b9-8617-646c6b7d6fed" containerID="eb9d6d3b43905ee6983cdb6d11ac9e901f9e7081b35ab896e47fbb9541f580f0" exitCode=0 Dec 11 10:36:11 crc kubenswrapper[5016]: I1211 10:36:11.753844 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"68ee6f31-89d2-46b9-8617-646c6b7d6fed","Type":"ContainerDied","Data":"eb9d6d3b43905ee6983cdb6d11ac9e901f9e7081b35ab896e47fbb9541f580f0"} Dec 11 10:36:11 crc kubenswrapper[5016]: I1211 10:36:11.758317 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"715ba2ec500972d475788bd9dd71159e5d758919577f305ec9a582ff1253a6f8"} Dec 11 10:36:11 crc kubenswrapper[5016]: I1211 10:36:11.764145 5016 generic.go:334] "Generic (PLEG): container finished" podID="a586cf38-b0a7-48ae-9563-8001393b7540" containerID="8e499ab13b3315328b5eaf04cf94dec6b5b7f38ab1ebbd35522c7e748fc7e5c0" exitCode=0 Dec 11 10:36:11 crc kubenswrapper[5016]: I1211 10:36:11.764226 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a586cf38-b0a7-48ae-9563-8001393b7540","Type":"ContainerDied","Data":"8e499ab13b3315328b5eaf04cf94dec6b5b7f38ab1ebbd35522c7e748fc7e5c0"} Dec 11 10:36:11 crc kubenswrapper[5016]: I1211 10:36:11.767248 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"be31d0ddac9de604674a1f9c1d86335f26c19cd3be755864a22f17beb9ec05e8"} Dec 11 10:36:12 crc kubenswrapper[5016]: I1211 10:36:12.285487 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:36:12 crc kubenswrapper[5016]: I1211 10:36:12.300602 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:12 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:12 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:12 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:12 crc kubenswrapper[5016]: I1211 10:36:12.301198 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:12 crc kubenswrapper[5016]: I1211 10:36:12.777552 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:36:13 crc kubenswrapper[5016]: I1211 10:36:13.306409 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:13 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:13 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:13 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:13 crc kubenswrapper[5016]: I1211 10:36:13.306506 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:14 crc kubenswrapper[5016]: I1211 10:36:14.297186 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:14 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:14 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:14 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:14 crc kubenswrapper[5016]: I1211 10:36:14.297244 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:14 crc kubenswrapper[5016]: E1211 10:36:14.443048 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:14 crc kubenswrapper[5016]: E1211 10:36:14.446898 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:14 crc kubenswrapper[5016]: E1211 10:36:14.448829 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:14 crc kubenswrapper[5016]: E1211 10:36:14.448868 5016 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" podUID="ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" containerName="kube-multus-additional-cni-plugins" Dec 11 10:36:15 crc kubenswrapper[5016]: I1211 10:36:15.298009 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:15 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:15 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:15 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:15 crc kubenswrapper[5016]: I1211 10:36:15.298290 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.298119 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:16 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:16 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:16 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.298200 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.380052 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.380123 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.380963 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.381115 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.381199 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-88s4j" Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.382512 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.382610 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.382691 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"d3786864fbe82871e1e8f0659e7fc3d63a316fdd9c493634f348a4ba731619f8"} pod="openshift-console/downloads-7954f5f757-88s4j" containerMessage="Container download-server failed liveness probe, will be restarted" Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.383517 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" containerID="cri-o://d3786864fbe82871e1e8f0659e7fc3d63a316fdd9c493634f348a4ba731619f8" gracePeriod=2 Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.562876 5016 patch_prober.go:28] interesting pod/console-f9d7485db-jpxgn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 11 10:36:16 crc kubenswrapper[5016]: I1211 10:36:16.562959 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-jpxgn" podUID="fa3166f9-577e-4994-9290-7ced66d69dcc" containerName="console" probeResult="failure" output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 11 10:36:17 crc kubenswrapper[5016]: I1211 10:36:17.300745 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:17 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:17 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:17 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:17 crc kubenswrapper[5016]: I1211 10:36:17.301360 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:17 crc kubenswrapper[5016]: I1211 10:36:17.866081 5016 generic.go:334] "Generic (PLEG): container finished" podID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerID="d3786864fbe82871e1e8f0659e7fc3d63a316fdd9c493634f348a4ba731619f8" exitCode=0 Dec 11 10:36:17 crc kubenswrapper[5016]: I1211 10:36:17.866126 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-88s4j" event={"ID":"ed376fff-5d17-48b1-b48c-ec0c3548dde4","Type":"ContainerDied","Data":"d3786864fbe82871e1e8f0659e7fc3d63a316fdd9c493634f348a4ba731619f8"} Dec 11 10:36:18 crc kubenswrapper[5016]: I1211 10:36:18.297990 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:18 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:18 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:18 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:18 crc kubenswrapper[5016]: I1211 10:36:18.298050 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:19 crc kubenswrapper[5016]: I1211 10:36:19.299627 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:19 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:19 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:19 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:19 crc kubenswrapper[5016]: I1211 10:36:19.299729 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:19 crc kubenswrapper[5016]: I1211 10:36:19.887001 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-tfj94_ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9/kube-multus-additional-cni-plugins/0.log" Dec 11 10:36:19 crc kubenswrapper[5016]: I1211 10:36:19.887070 5016 generic.go:334] "Generic (PLEG): container finished" podID="ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" exitCode=137 Dec 11 10:36:19 crc kubenswrapper[5016]: I1211 10:36:19.887113 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" event={"ID":"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9","Type":"ContainerDied","Data":"42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c"} Dec 11 10:36:20 crc kubenswrapper[5016]: I1211 10:36:20.297683 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:20 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:20 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:20 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:20 crc kubenswrapper[5016]: I1211 10:36:20.297767 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:21 crc kubenswrapper[5016]: I1211 10:36:21.299163 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:21 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:21 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:21 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:21 crc kubenswrapper[5016]: I1211 10:36:21.299491 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:21 crc kubenswrapper[5016]: I1211 10:36:21.872220 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 10:36:21 crc kubenswrapper[5016]: I1211 10:36:21.881697 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 10:36:21 crc kubenswrapper[5016]: I1211 10:36:21.901712 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"68ee6f31-89d2-46b9-8617-646c6b7d6fed","Type":"ContainerDied","Data":"da5ca759f73e0fffa68cce9a1d470dc3b9a2d4dea45ae31c6de65bde47fd60bf"} Dec 11 10:36:21 crc kubenswrapper[5016]: I1211 10:36:21.902338 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da5ca759f73e0fffa68cce9a1d470dc3b9a2d4dea45ae31c6de65bde47fd60bf" Dec 11 10:36:21 crc kubenswrapper[5016]: I1211 10:36:21.901768 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 11 10:36:21 crc kubenswrapper[5016]: I1211 10:36:21.904911 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a586cf38-b0a7-48ae-9563-8001393b7540","Type":"ContainerDied","Data":"d6d05dfb7c6ec18a4cc8fb67be773a5f63bcc1878797417b9028cb7c44990fca"} Dec 11 10:36:21 crc kubenswrapper[5016]: I1211 10:36:21.904974 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d6d05dfb7c6ec18a4cc8fb67be773a5f63bcc1878797417b9028cb7c44990fca" Dec 11 10:36:21 crc kubenswrapper[5016]: I1211 10:36:21.905059 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.005122 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a586cf38-b0a7-48ae-9563-8001393b7540-kubelet-dir\") pod \"a586cf38-b0a7-48ae-9563-8001393b7540\" (UID: \"a586cf38-b0a7-48ae-9563-8001393b7540\") " Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.005183 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/68ee6f31-89d2-46b9-8617-646c6b7d6fed-kubelet-dir\") pod \"68ee6f31-89d2-46b9-8617-646c6b7d6fed\" (UID: \"68ee6f31-89d2-46b9-8617-646c6b7d6fed\") " Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.005280 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a586cf38-b0a7-48ae-9563-8001393b7540-kube-api-access\") pod \"a586cf38-b0a7-48ae-9563-8001393b7540\" (UID: \"a586cf38-b0a7-48ae-9563-8001393b7540\") " Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.005271 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a586cf38-b0a7-48ae-9563-8001393b7540-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a586cf38-b0a7-48ae-9563-8001393b7540" (UID: "a586cf38-b0a7-48ae-9563-8001393b7540"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.005361 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/68ee6f31-89d2-46b9-8617-646c6b7d6fed-kube-api-access\") pod \"68ee6f31-89d2-46b9-8617-646c6b7d6fed\" (UID: \"68ee6f31-89d2-46b9-8617-646c6b7d6fed\") " Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.005375 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68ee6f31-89d2-46b9-8617-646c6b7d6fed-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "68ee6f31-89d2-46b9-8617-646c6b7d6fed" (UID: "68ee6f31-89d2-46b9-8617-646c6b7d6fed"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.005625 5016 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a586cf38-b0a7-48ae-9563-8001393b7540-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.005640 5016 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/68ee6f31-89d2-46b9-8617-646c6b7d6fed-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.011122 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a586cf38-b0a7-48ae-9563-8001393b7540-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a586cf38-b0a7-48ae-9563-8001393b7540" (UID: "a586cf38-b0a7-48ae-9563-8001393b7540"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.011165 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68ee6f31-89d2-46b9-8617-646c6b7d6fed-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "68ee6f31-89d2-46b9-8617-646c6b7d6fed" (UID: "68ee6f31-89d2-46b9-8617-646c6b7d6fed"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.106656 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a586cf38-b0a7-48ae-9563-8001393b7540-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.106714 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/68ee6f31-89d2-46b9-8617-646c6b7d6fed-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.301198 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:22 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:22 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:22 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:22 crc kubenswrapper[5016]: I1211 10:36:22.301547 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:23 crc kubenswrapper[5016]: I1211 10:36:23.299443 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:23 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:23 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:23 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:23 crc kubenswrapper[5016]: I1211 10:36:23.299551 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:24 crc kubenswrapper[5016]: I1211 10:36:24.297676 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:24 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:24 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:24 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:24 crc kubenswrapper[5016]: I1211 10:36:24.297761 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:24 crc kubenswrapper[5016]: E1211 10:36:24.441708 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c is running failed: container process not found" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:24 crc kubenswrapper[5016]: E1211 10:36:24.443282 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c is running failed: container process not found" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:24 crc kubenswrapper[5016]: E1211 10:36:24.444145 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c is running failed: container process not found" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:24 crc kubenswrapper[5016]: E1211 10:36:24.444188 5016 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c is running failed: container process not found" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" podUID="ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" containerName="kube-multus-additional-cni-plugins" Dec 11 10:36:25 crc kubenswrapper[5016]: I1211 10:36:25.299174 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:25 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:25 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:25 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:25 crc kubenswrapper[5016]: I1211 10:36:25.299549 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:26 crc kubenswrapper[5016]: I1211 10:36:26.092779 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9dqtm" Dec 11 10:36:26 crc kubenswrapper[5016]: I1211 10:36:26.298590 5016 patch_prober.go:28] interesting pod/router-default-5444994796-8f46b container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 11 10:36:26 crc kubenswrapper[5016]: [-]has-synced failed: reason withheld Dec 11 10:36:26 crc kubenswrapper[5016]: [+]process-running ok Dec 11 10:36:26 crc kubenswrapper[5016]: healthz check failed Dec 11 10:36:26 crc kubenswrapper[5016]: I1211 10:36:26.298690 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8f46b" podUID="4a914a7e-cc73-4d59-a122-e58d5f2da33b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:36:26 crc kubenswrapper[5016]: I1211 10:36:26.379519 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:36:26 crc kubenswrapper[5016]: I1211 10:36:26.379577 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:36:26 crc kubenswrapper[5016]: I1211 10:36:26.563278 5016 patch_prober.go:28] interesting pod/console-f9d7485db-jpxgn container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 11 10:36:26 crc kubenswrapper[5016]: I1211 10:36:26.563365 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-jpxgn" podUID="fa3166f9-577e-4994-9290-7ced66d69dcc" containerName="console" probeResult="failure" output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 11 10:36:27 crc kubenswrapper[5016]: I1211 10:36:27.299073 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:36:27 crc kubenswrapper[5016]: I1211 10:36:27.301758 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-8f46b" Dec 11 10:36:34 crc kubenswrapper[5016]: E1211 10:36:34.443129 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c is running failed: container process not found" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:34 crc kubenswrapper[5016]: E1211 10:36:34.444606 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c is running failed: container process not found" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:34 crc kubenswrapper[5016]: E1211 10:36:34.445177 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c is running failed: container process not found" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:34 crc kubenswrapper[5016]: E1211 10:36:34.445226 5016 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c is running failed: container process not found" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" podUID="ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" containerName="kube-multus-additional-cni-plugins" Dec 11 10:36:36 crc kubenswrapper[5016]: I1211 10:36:36.379542 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:36:36 crc kubenswrapper[5016]: I1211 10:36:36.379910 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.033277 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 11 10:36:38 crc kubenswrapper[5016]: E1211 10:36:38.033585 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a586cf38-b0a7-48ae-9563-8001393b7540" containerName="pruner" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.033602 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="a586cf38-b0a7-48ae-9563-8001393b7540" containerName="pruner" Dec 11 10:36:38 crc kubenswrapper[5016]: E1211 10:36:38.033616 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68ee6f31-89d2-46b9-8617-646c6b7d6fed" containerName="pruner" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.033624 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="68ee6f31-89d2-46b9-8617-646c6b7d6fed" containerName="pruner" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.033723 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="68ee6f31-89d2-46b9-8617-646c6b7d6fed" containerName="pruner" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.033738 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="a586cf38-b0a7-48ae-9563-8001393b7540" containerName="pruner" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.034120 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.036979 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.037193 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.043976 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.140593 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/28cc663b-e04d-4eb9-9b34-4fb847ca59ef-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"28cc663b-e04d-4eb9-9b34-4fb847ca59ef\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.140690 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/28cc663b-e04d-4eb9-9b34-4fb847ca59ef-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"28cc663b-e04d-4eb9-9b34-4fb847ca59ef\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.242512 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/28cc663b-e04d-4eb9-9b34-4fb847ca59ef-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"28cc663b-e04d-4eb9-9b34-4fb847ca59ef\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.242592 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/28cc663b-e04d-4eb9-9b34-4fb847ca59ef-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"28cc663b-e04d-4eb9-9b34-4fb847ca59ef\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.242711 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/28cc663b-e04d-4eb9-9b34-4fb847ca59ef-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"28cc663b-e04d-4eb9-9b34-4fb847ca59ef\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.263490 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/28cc663b-e04d-4eb9-9b34-4fb847ca59ef-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"28cc663b-e04d-4eb9-9b34-4fb847ca59ef\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 10:36:38 crc kubenswrapper[5016]: I1211 10:36:38.353108 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 10:36:39 crc kubenswrapper[5016]: I1211 10:36:39.951462 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:36:39 crc kubenswrapper[5016]: I1211 10:36:39.955670 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:36:42 crc kubenswrapper[5016]: I1211 10:36:42.831467 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 11 10:36:42 crc kubenswrapper[5016]: I1211 10:36:42.832205 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:36:42 crc kubenswrapper[5016]: I1211 10:36:42.840880 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 11 10:36:42 crc kubenswrapper[5016]: I1211 10:36:42.904012 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eda32bb5-f0ba-418d-936d-8f3909f5d759-kubelet-dir\") pod \"installer-9-crc\" (UID: \"eda32bb5-f0ba-418d-936d-8f3909f5d759\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:36:42 crc kubenswrapper[5016]: I1211 10:36:42.904071 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/eda32bb5-f0ba-418d-936d-8f3909f5d759-var-lock\") pod \"installer-9-crc\" (UID: \"eda32bb5-f0ba-418d-936d-8f3909f5d759\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:36:42 crc kubenswrapper[5016]: I1211 10:36:42.904123 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eda32bb5-f0ba-418d-936d-8f3909f5d759-kube-api-access\") pod \"installer-9-crc\" (UID: \"eda32bb5-f0ba-418d-936d-8f3909f5d759\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:36:43 crc kubenswrapper[5016]: I1211 10:36:43.005815 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eda32bb5-f0ba-418d-936d-8f3909f5d759-kube-api-access\") pod \"installer-9-crc\" (UID: \"eda32bb5-f0ba-418d-936d-8f3909f5d759\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:36:43 crc kubenswrapper[5016]: I1211 10:36:43.005923 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eda32bb5-f0ba-418d-936d-8f3909f5d759-kubelet-dir\") pod \"installer-9-crc\" (UID: \"eda32bb5-f0ba-418d-936d-8f3909f5d759\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:36:43 crc kubenswrapper[5016]: I1211 10:36:43.005980 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/eda32bb5-f0ba-418d-936d-8f3909f5d759-var-lock\") pod \"installer-9-crc\" (UID: \"eda32bb5-f0ba-418d-936d-8f3909f5d759\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:36:43 crc kubenswrapper[5016]: I1211 10:36:43.006097 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/eda32bb5-f0ba-418d-936d-8f3909f5d759-var-lock\") pod \"installer-9-crc\" (UID: \"eda32bb5-f0ba-418d-936d-8f3909f5d759\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:36:43 crc kubenswrapper[5016]: I1211 10:36:43.006499 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eda32bb5-f0ba-418d-936d-8f3909f5d759-kubelet-dir\") pod \"installer-9-crc\" (UID: \"eda32bb5-f0ba-418d-936d-8f3909f5d759\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:36:43 crc kubenswrapper[5016]: I1211 10:36:43.027951 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eda32bb5-f0ba-418d-936d-8f3909f5d759-kube-api-access\") pod \"installer-9-crc\" (UID: \"eda32bb5-f0ba-418d-936d-8f3909f5d759\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:36:43 crc kubenswrapper[5016]: I1211 10:36:43.159107 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:36:44 crc kubenswrapper[5016]: E1211 10:36:44.442083 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c is running failed: container process not found" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:44 crc kubenswrapper[5016]: E1211 10:36:44.442824 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c is running failed: container process not found" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:44 crc kubenswrapper[5016]: E1211 10:36:44.443443 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c is running failed: container process not found" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" cmd=["/bin/bash","-c","test -f /ready/ready"] Dec 11 10:36:44 crc kubenswrapper[5016]: E1211 10:36:44.443530 5016 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c is running failed: container process not found" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" podUID="ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" containerName="kube-multus-additional-cni-plugins" Dec 11 10:36:46 crc kubenswrapper[5016]: I1211 10:36:46.380959 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:36:46 crc kubenswrapper[5016]: I1211 10:36:46.381345 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:36:50 crc kubenswrapper[5016]: I1211 10:36:50.183009 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.124382 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-tfj94_ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9/kube-multus-additional-cni-plugins/0.log" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.124646 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.205066 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-tfj94_ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9/kube-multus-additional-cni-plugins/0.log" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.205132 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" event={"ID":"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9","Type":"ContainerDied","Data":"a5bee6a25f73ff086fd99099925587b483b187bd396f811a6a79d5f09729b5b2"} Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.205184 5016 scope.go:117] "RemoveContainer" containerID="42e1fd0b0bf91f690e07ae1608a12f481391dfe43a31eeca3bcb02ef552bb73c" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.205293 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-tfj94" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.230748 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-ready\") pod \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.230826 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-tuning-conf-dir\") pod \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.230887 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-cni-sysctl-allowlist\") pod \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.230916 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxhwt\" (UniqueName: \"kubernetes.io/projected/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-kube-api-access-rxhwt\") pod \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\" (UID: \"ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9\") " Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.231848 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-tuning-conf-dir" (OuterVolumeSpecName: "tuning-conf-dir") pod "ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" (UID: "ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9"). InnerVolumeSpecName "tuning-conf-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.232123 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-ready" (OuterVolumeSpecName: "ready") pod "ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" (UID: "ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9"). InnerVolumeSpecName "ready". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.232413 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" (UID: "ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.238818 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-kube-api-access-rxhwt" (OuterVolumeSpecName: "kube-api-access-rxhwt") pod "ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" (UID: "ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9"). InnerVolumeSpecName "kube-api-access-rxhwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.332521 5016 reconciler_common.go:293] "Volume detached for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-ready\") on node \"crc\" DevicePath \"\"" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.332581 5016 reconciler_common.go:293] "Volume detached for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-tuning-conf-dir\") on node \"crc\" DevicePath \"\"" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.332606 5016 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.332626 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxhwt\" (UniqueName: \"kubernetes.io/projected/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9-kube-api-access-rxhwt\") on node \"crc\" DevicePath \"\"" Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.535774 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-tfj94"] Dec 11 10:36:54 crc kubenswrapper[5016]: I1211 10:36:54.538568 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-tfj94"] Dec 11 10:36:55 crc kubenswrapper[5016]: I1211 10:36:55.483723 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" path="/var/lib/kubelet/pods/ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9/volumes" Dec 11 10:36:56 crc kubenswrapper[5016]: I1211 10:36:56.380139 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:36:56 crc kubenswrapper[5016]: I1211 10:36:56.380722 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:36:56 crc kubenswrapper[5016]: E1211 10:36:56.392855 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e\": context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 11 10:36:56 crc kubenswrapper[5016]: E1211 10:36:56.393089 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n4jhp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-rx8bv_openshift-marketplace(2f07c0be-3ff2-4b4a-86f1-67da5394f101): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e\": context canceled" logger="UnhandledError" Dec 11 10:36:56 crc kubenswrapper[5016]: E1211 10:36:56.394266 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e: Get \\\"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e\\\": context canceled\"" pod="openshift-marketplace/redhat-operators-rx8bv" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" Dec 11 10:36:56 crc kubenswrapper[5016]: E1211 10:36:56.511976 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage2886471681/2\": happened during read: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 11 10:36:56 crc kubenswrapper[5016]: E1211 10:36:56.512129 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nwsjx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-vb25j_openshift-marketplace(edb91373-b8a5-4426-9a6b-1fbb6c9f2846): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage2886471681/2\": happened during read: context canceled" logger="UnhandledError" Dec 11 10:36:56 crc kubenswrapper[5016]: E1211 10:36:56.513355 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \\\"/var/tmp/container_images_storage2886471681/2\\\": happened during read: context canceled\"" pod="openshift-marketplace/redhat-marketplace-vb25j" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" Dec 11 10:36:57 crc kubenswrapper[5016]: E1211 10:36:57.133434 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 11 10:36:57 crc kubenswrapper[5016]: E1211 10:36:57.134263 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gc7ml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-hqmxw_openshift-marketplace(f393088a-dacc-4673-8074-d6be25842a84): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 10:36:57 crc kubenswrapper[5016]: E1211 10:36:57.135615 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-hqmxw" podUID="f393088a-dacc-4673-8074-d6be25842a84" Dec 11 10:37:00 crc kubenswrapper[5016]: E1211 10:37:00.939428 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-vb25j" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" Dec 11 10:37:00 crc kubenswrapper[5016]: E1211 10:37:00.939488 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-rx8bv" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" Dec 11 10:37:00 crc kubenswrapper[5016]: E1211 10:37:00.939602 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-hqmxw" podUID="f393088a-dacc-4673-8074-d6be25842a84" Dec 11 10:37:00 crc kubenswrapper[5016]: E1211 10:37:00.939717 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e\": context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 11 10:37:00 crc kubenswrapper[5016]: E1211 10:37:00.940082 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xqbf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-rzcjf_openshift-marketplace(89fda315-d1f2-484a-aa91-ec75f0b0227e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e\": context canceled" logger="UnhandledError" Dec 11 10:37:00 crc kubenswrapper[5016]: E1211 10:37:00.943176 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e: Get \\\"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:e9bc35478da4e272fcc5e4573ebac9535075e1f2d8c613b985ef6e3a3c0c813e\\\": context canceled\"" pod="openshift-marketplace/redhat-operators-rzcjf" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" Dec 11 10:37:03 crc kubenswrapper[5016]: E1211 10:37:03.588401 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-rzcjf" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" Dec 11 10:37:03 crc kubenswrapper[5016]: E1211 10:37:03.833839 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 11 10:37:03 crc kubenswrapper[5016]: E1211 10:37:03.834042 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tm8r2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-tp5lv_openshift-marketplace(7242e8c3-6ed6-4613-8fc9-1339be494e56): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 10:37:03 crc kubenswrapper[5016]: E1211 10:37:03.835413 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-tp5lv" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" Dec 11 10:37:04 crc kubenswrapper[5016]: E1211 10:37:04.541996 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 11 10:37:04 crc kubenswrapper[5016]: E1211 10:37:04.542668 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7m5zn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-fc9qn_openshift-marketplace(8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 10:37:04 crc kubenswrapper[5016]: E1211 10:37:04.546015 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-fc9qn" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" Dec 11 10:37:05 crc kubenswrapper[5016]: E1211 10:37:05.929355 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-fc9qn" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" Dec 11 10:37:05 crc kubenswrapper[5016]: E1211 10:37:05.929540 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-tp5lv" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" Dec 11 10:37:06 crc kubenswrapper[5016]: E1211 10:37:06.040576 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 11 10:37:06 crc kubenswrapper[5016]: E1211 10:37:06.041138 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g6n2x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-c6sdb_openshift-marketplace(9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 10:37:06 crc kubenswrapper[5016]: E1211 10:37:06.042302 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-c6sdb" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" Dec 11 10:37:06 crc kubenswrapper[5016]: E1211 10:37:06.120281 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 11 10:37:06 crc kubenswrapper[5016]: E1211 10:37:06.120454 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ctk8w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-r5rgf_openshift-marketplace(623ddc04-83e2-42ac-bcac-59b72d2fac2a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 10:37:06 crc kubenswrapper[5016]: E1211 10:37:06.122050 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-r5rgf" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" Dec 11 10:37:06 crc kubenswrapper[5016]: I1211 10:37:06.288307 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-88s4j" event={"ID":"ed376fff-5d17-48b1-b48c-ec0c3548dde4","Type":"ContainerStarted","Data":"d53b71515c5a2d8d9c4f7d6cf136499816778a86d82e5e0da9b8ccf358d09940"} Dec 11 10:37:06 crc kubenswrapper[5016]: I1211 10:37:06.289910 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-88s4j" Dec 11 10:37:06 crc kubenswrapper[5016]: I1211 10:37:06.290011 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:06 crc kubenswrapper[5016]: I1211 10:37:06.290129 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:06 crc kubenswrapper[5016]: I1211 10:37:06.294063 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 11 10:37:06 crc kubenswrapper[5016]: E1211 10:37:06.296168 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-r5rgf" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" Dec 11 10:37:06 crc kubenswrapper[5016]: E1211 10:37:06.301261 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-c6sdb" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" Dec 11 10:37:06 crc kubenswrapper[5016]: I1211 10:37:06.383117 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:06 crc kubenswrapper[5016]: I1211 10:37:06.383169 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:06 crc kubenswrapper[5016]: I1211 10:37:06.383192 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:06 crc kubenswrapper[5016]: I1211 10:37:06.383228 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:06 crc kubenswrapper[5016]: I1211 10:37:06.461704 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 11 10:37:07 crc kubenswrapper[5016]: I1211 10:37:07.306709 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"28cc663b-e04d-4eb9-9b34-4fb847ca59ef","Type":"ContainerStarted","Data":"d31a13aedc8fdb11c8c2787dd8ac085f7f5a6849db23391cd383ade2e1f9a561"} Dec 11 10:37:07 crc kubenswrapper[5016]: I1211 10:37:07.307146 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"28cc663b-e04d-4eb9-9b34-4fb847ca59ef","Type":"ContainerStarted","Data":"1bf1eabdc8ad7d4e67dd88c1c6f5c7b112a7877696dfec9e1016bc3cd70cefe6"} Dec 11 10:37:07 crc kubenswrapper[5016]: I1211 10:37:07.311921 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"eda32bb5-f0ba-418d-936d-8f3909f5d759","Type":"ContainerStarted","Data":"1d01538fa0ab9834278dd1774ce732ada980ccebb08331a714bd64ffe87f8e92"} Dec 11 10:37:07 crc kubenswrapper[5016]: I1211 10:37:07.312026 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"eda32bb5-f0ba-418d-936d-8f3909f5d759","Type":"ContainerStarted","Data":"25d3ac9d6c110c3a4d6abc6a28da18d39912c24f2e9d1e72a3280ccb76144adf"} Dec 11 10:37:07 crc kubenswrapper[5016]: I1211 10:37:07.312577 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:07 crc kubenswrapper[5016]: I1211 10:37:07.312689 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:07 crc kubenswrapper[5016]: I1211 10:37:07.326032 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=29.325997525 podStartE2EDuration="29.325997525s" podCreationTimestamp="2025-12-11 10:36:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:37:07.325115502 +0000 UTC m=+144.143675101" watchObservedRunningTime="2025-12-11 10:37:07.325997525 +0000 UTC m=+144.144557114" Dec 11 10:37:07 crc kubenswrapper[5016]: I1211 10:37:07.346781 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=25.346761017 podStartE2EDuration="25.346761017s" podCreationTimestamp="2025-12-11 10:36:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:37:07.345908824 +0000 UTC m=+144.164468433" watchObservedRunningTime="2025-12-11 10:37:07.346761017 +0000 UTC m=+144.165320606" Dec 11 10:37:08 crc kubenswrapper[5016]: I1211 10:37:08.320558 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:08 crc kubenswrapper[5016]: I1211 10:37:08.320953 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:09 crc kubenswrapper[5016]: I1211 10:37:09.330701 5016 generic.go:334] "Generic (PLEG): container finished" podID="28cc663b-e04d-4eb9-9b34-4fb847ca59ef" containerID="d31a13aedc8fdb11c8c2787dd8ac085f7f5a6849db23391cd383ade2e1f9a561" exitCode=0 Dec 11 10:37:09 crc kubenswrapper[5016]: I1211 10:37:09.330758 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"28cc663b-e04d-4eb9-9b34-4fb847ca59ef","Type":"ContainerDied","Data":"d31a13aedc8fdb11c8c2787dd8ac085f7f5a6849db23391cd383ade2e1f9a561"} Dec 11 10:37:10 crc kubenswrapper[5016]: I1211 10:37:10.671291 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 10:37:10 crc kubenswrapper[5016]: I1211 10:37:10.803747 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/28cc663b-e04d-4eb9-9b34-4fb847ca59ef-kube-api-access\") pod \"28cc663b-e04d-4eb9-9b34-4fb847ca59ef\" (UID: \"28cc663b-e04d-4eb9-9b34-4fb847ca59ef\") " Dec 11 10:37:10 crc kubenswrapper[5016]: I1211 10:37:10.803865 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/28cc663b-e04d-4eb9-9b34-4fb847ca59ef-kubelet-dir\") pod \"28cc663b-e04d-4eb9-9b34-4fb847ca59ef\" (UID: \"28cc663b-e04d-4eb9-9b34-4fb847ca59ef\") " Dec 11 10:37:10 crc kubenswrapper[5016]: I1211 10:37:10.804020 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/28cc663b-e04d-4eb9-9b34-4fb847ca59ef-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "28cc663b-e04d-4eb9-9b34-4fb847ca59ef" (UID: "28cc663b-e04d-4eb9-9b34-4fb847ca59ef"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:37:10 crc kubenswrapper[5016]: I1211 10:37:10.804203 5016 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/28cc663b-e04d-4eb9-9b34-4fb847ca59ef-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 11 10:37:10 crc kubenswrapper[5016]: I1211 10:37:10.809083 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28cc663b-e04d-4eb9-9b34-4fb847ca59ef-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "28cc663b-e04d-4eb9-9b34-4fb847ca59ef" (UID: "28cc663b-e04d-4eb9-9b34-4fb847ca59ef"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:37:10 crc kubenswrapper[5016]: I1211 10:37:10.905664 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/28cc663b-e04d-4eb9-9b34-4fb847ca59ef-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 10:37:11 crc kubenswrapper[5016]: I1211 10:37:11.343207 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"28cc663b-e04d-4eb9-9b34-4fb847ca59ef","Type":"ContainerDied","Data":"1bf1eabdc8ad7d4e67dd88c1c6f5c7b112a7877696dfec9e1016bc3cd70cefe6"} Dec 11 10:37:11 crc kubenswrapper[5016]: I1211 10:37:11.343612 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1bf1eabdc8ad7d4e67dd88c1c6f5c7b112a7877696dfec9e1016bc3cd70cefe6" Dec 11 10:37:11 crc kubenswrapper[5016]: I1211 10:37:11.343575 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 11 10:37:11 crc kubenswrapper[5016]: I1211 10:37:11.934650 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4k8l5"] Dec 11 10:37:16 crc kubenswrapper[5016]: I1211 10:37:16.379782 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:16 crc kubenswrapper[5016]: I1211 10:37:16.380859 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:16 crc kubenswrapper[5016]: I1211 10:37:16.380521 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:16 crc kubenswrapper[5016]: I1211 10:37:16.381246 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:26 crc kubenswrapper[5016]: I1211 10:37:26.379559 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:26 crc kubenswrapper[5016]: I1211 10:37:26.380143 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:26 crc kubenswrapper[5016]: I1211 10:37:26.380199 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-88s4j" Dec 11 10:37:26 crc kubenswrapper[5016]: I1211 10:37:26.380789 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"d53b71515c5a2d8d9c4f7d6cf136499816778a86d82e5e0da9b8ccf358d09940"} pod="openshift-console/downloads-7954f5f757-88s4j" containerMessage="Container download-server failed liveness probe, will be restarted" Dec 11 10:37:26 crc kubenswrapper[5016]: I1211 10:37:26.380823 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" containerID="cri-o://d53b71515c5a2d8d9c4f7d6cf136499816778a86d82e5e0da9b8ccf358d09940" gracePeriod=2 Dec 11 10:37:26 crc kubenswrapper[5016]: I1211 10:37:26.379596 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:26 crc kubenswrapper[5016]: I1211 10:37:26.380959 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:26 crc kubenswrapper[5016]: I1211 10:37:26.381264 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:26 crc kubenswrapper[5016]: I1211 10:37:26.381323 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:32 crc kubenswrapper[5016]: I1211 10:37:32.495005 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_downloads-7954f5f757-88s4j_ed376fff-5d17-48b1-b48c-ec0c3548dde4/download-server/1.log" Dec 11 10:37:32 crc kubenswrapper[5016]: I1211 10:37:32.497563 5016 generic.go:334] "Generic (PLEG): container finished" podID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerID="d53b71515c5a2d8d9c4f7d6cf136499816778a86d82e5e0da9b8ccf358d09940" exitCode=137 Dec 11 10:37:32 crc kubenswrapper[5016]: I1211 10:37:32.497653 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-88s4j" event={"ID":"ed376fff-5d17-48b1-b48c-ec0c3548dde4","Type":"ContainerDied","Data":"d53b71515c5a2d8d9c4f7d6cf136499816778a86d82e5e0da9b8ccf358d09940"} Dec 11 10:37:32 crc kubenswrapper[5016]: I1211 10:37:32.497886 5016 scope.go:117] "RemoveContainer" containerID="d3786864fbe82871e1e8f0659e7fc3d63a316fdd9c493634f348a4ba731619f8" Dec 11 10:37:36 crc kubenswrapper[5016]: I1211 10:37:36.379648 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:36 crc kubenswrapper[5016]: I1211 10:37:36.379738 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:36 crc kubenswrapper[5016]: I1211 10:37:36.991317 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" containerName="oauth-openshift" containerID="cri-o://af4cdd2cafc9fea690f4c35a1e0f4de9d56d8a8d9d4a0c7b8889f7d8c20723fc" gracePeriod=15 Dec 11 10:37:40 crc kubenswrapper[5016]: I1211 10:37:40.559611 5016 generic.go:334] "Generic (PLEG): container finished" podID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" containerID="af4cdd2cafc9fea690f4c35a1e0f4de9d56d8a8d9d4a0c7b8889f7d8c20723fc" exitCode=0 Dec 11 10:37:40 crc kubenswrapper[5016]: I1211 10:37:40.559727 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" event={"ID":"a91554fe-759f-4f9a-9d88-7b4d8650a08b","Type":"ContainerDied","Data":"af4cdd2cafc9fea690f4c35a1e0f4de9d56d8a8d9d4a0c7b8889f7d8c20723fc"} Dec 11 10:37:42 crc kubenswrapper[5016]: I1211 10:37:42.944265 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:37:42 crc kubenswrapper[5016]: I1211 10:37:42.944905 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.593143 5016 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 11 10:37:44 crc kubenswrapper[5016]: E1211 10:37:44.593495 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" containerName="kube-multus-additional-cni-plugins" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.593512 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" containerName="kube-multus-additional-cni-plugins" Dec 11 10:37:44 crc kubenswrapper[5016]: E1211 10:37:44.593533 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28cc663b-e04d-4eb9-9b34-4fb847ca59ef" containerName="pruner" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.593543 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="28cc663b-e04d-4eb9-9b34-4fb847ca59ef" containerName="pruner" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.597559 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="28cc663b-e04d-4eb9-9b34-4fb847ca59ef" containerName="pruner" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.597651 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffa146b6-e6b1-4715-bd2e-7c2c09cc8bf9" containerName="kube-multus-additional-cni-plugins" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.598386 5016 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.599016 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498" gracePeriod=15 Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.599265 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.599249 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://31d86d5f7654802bd05a4a9c4d506deb5b9bcf5fd1ae02722093170105a93409" gracePeriod=15 Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.599717 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4" gracePeriod=15 Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.599791 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166" gracePeriod=15 Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.599750 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43" gracePeriod=15 Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.601231 5016 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 11 10:37:44 crc kubenswrapper[5016]: E1211 10:37:44.601719 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.601735 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 11 10:37:44 crc kubenswrapper[5016]: E1211 10:37:44.601768 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.601778 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 11 10:37:44 crc kubenswrapper[5016]: E1211 10:37:44.601795 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.601806 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 11 10:37:44 crc kubenswrapper[5016]: E1211 10:37:44.601823 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.601832 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 11 10:37:44 crc kubenswrapper[5016]: E1211 10:37:44.601848 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.601856 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 11 10:37:44 crc kubenswrapper[5016]: E1211 10:37:44.601867 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.601876 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 11 10:37:44 crc kubenswrapper[5016]: E1211 10:37:44.601891 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.601899 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.602202 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.602225 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.602243 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.602258 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.602268 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.602285 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.643310 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.729201 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.729271 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.729298 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.729315 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.729646 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.732310 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.732385 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.732453 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834399 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834485 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834548 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834582 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834617 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834607 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834640 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834684 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834706 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834744 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834763 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834775 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834845 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834894 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.834944 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.835069 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:37:44 crc kubenswrapper[5016]: I1211 10:37:44.937717 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:37:45 crc kubenswrapper[5016]: I1211 10:37:45.599476 5016 generic.go:334] "Generic (PLEG): container finished" podID="eda32bb5-f0ba-418d-936d-8f3909f5d759" containerID="1d01538fa0ab9834278dd1774ce732ada980ccebb08331a714bd64ffe87f8e92" exitCode=0 Dec 11 10:37:45 crc kubenswrapper[5016]: I1211 10:37:45.599573 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"eda32bb5-f0ba-418d-936d-8f3909f5d759","Type":"ContainerDied","Data":"1d01538fa0ab9834278dd1774ce732ada980ccebb08331a714bd64ffe87f8e92"} Dec 11 10:37:45 crc kubenswrapper[5016]: I1211 10:37:45.600622 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:45 crc kubenswrapper[5016]: I1211 10:37:45.601202 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:45 crc kubenswrapper[5016]: I1211 10:37:45.602531 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 11 10:37:45 crc kubenswrapper[5016]: I1211 10:37:45.604464 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 11 10:37:45 crc kubenswrapper[5016]: I1211 10:37:45.605186 5016 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="31d86d5f7654802bd05a4a9c4d506deb5b9bcf5fd1ae02722093170105a93409" exitCode=0 Dec 11 10:37:45 crc kubenswrapper[5016]: I1211 10:37:45.605208 5016 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4" exitCode=0 Dec 11 10:37:45 crc kubenswrapper[5016]: I1211 10:37:45.605215 5016 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43" exitCode=0 Dec 11 10:37:45 crc kubenswrapper[5016]: I1211 10:37:45.605223 5016 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166" exitCode=2 Dec 11 10:37:46 crc kubenswrapper[5016]: I1211 10:37:46.380181 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:46 crc kubenswrapper[5016]: I1211 10:37:46.380261 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:46 crc kubenswrapper[5016]: E1211 10:37:46.802611 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:37:46Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:37:46Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:37:46Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:37:46Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:46 crc kubenswrapper[5016]: E1211 10:37:46.803142 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:46 crc kubenswrapper[5016]: E1211 10:37:46.805800 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:46 crc kubenswrapper[5016]: E1211 10:37:46.806163 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:46 crc kubenswrapper[5016]: E1211 10:37:46.806443 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:46 crc kubenswrapper[5016]: E1211 10:37:46.806463 5016 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 10:37:47 crc kubenswrapper[5016]: I1211 10:37:47.316838 5016 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4k8l5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.24:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:37:47 crc kubenswrapper[5016]: I1211 10:37:47.317181 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.24:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 10:37:47 crc kubenswrapper[5016]: E1211 10:37:47.317593 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event=< Dec 11 10:37:47 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:37:47 crc kubenswrapper[5016]: body: Dec 11 10:37:47 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:37:47 crc kubenswrapper[5016]: > Dec 11 10:37:48 crc kubenswrapper[5016]: I1211 10:37:48.622542 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 11 10:37:48 crc kubenswrapper[5016]: I1211 10:37:48.624572 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 11 10:37:48 crc kubenswrapper[5016]: I1211 10:37:48.625561 5016 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498" exitCode=0 Dec 11 10:37:50 crc kubenswrapper[5016]: E1211 10:37:50.861557 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event=< Dec 11 10:37:50 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:37:50 crc kubenswrapper[5016]: body: Dec 11 10:37:50 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:37:50 crc kubenswrapper[5016]: > Dec 11 10:37:53 crc kubenswrapper[5016]: E1211 10:37:53.102541 5016 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:53 crc kubenswrapper[5016]: E1211 10:37:53.103715 5016 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:53 crc kubenswrapper[5016]: E1211 10:37:53.104752 5016 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:53 crc kubenswrapper[5016]: E1211 10:37:53.105456 5016 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:53 crc kubenswrapper[5016]: E1211 10:37:53.105903 5016 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:53 crc kubenswrapper[5016]: I1211 10:37:53.105974 5016 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Dec 11 10:37:53 crc kubenswrapper[5016]: E1211 10:37:53.106477 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="200ms" Dec 11 10:37:53 crc kubenswrapper[5016]: E1211 10:37:53.307894 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="400ms" Dec 11 10:37:53 crc kubenswrapper[5016]: I1211 10:37:53.476956 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:53 crc kubenswrapper[5016]: I1211 10:37:53.477259 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:53 crc kubenswrapper[5016]: E1211 10:37:53.709527 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="800ms" Dec 11 10:37:54 crc kubenswrapper[5016]: E1211 10:37:54.511631 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="1.6s" Dec 11 10:37:56 crc kubenswrapper[5016]: E1211 10:37:56.112650 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="3.2s" Dec 11 10:37:56 crc kubenswrapper[5016]: I1211 10:37:56.379272 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:37:56 crc kubenswrapper[5016]: I1211 10:37:56.379670 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:37:57 crc kubenswrapper[5016]: E1211 10:37:57.030853 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:37:57Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:37:57Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:37:57Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:37:57Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:57 crc kubenswrapper[5016]: E1211 10:37:57.033501 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:57 crc kubenswrapper[5016]: E1211 10:37:57.033752 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:57 crc kubenswrapper[5016]: E1211 10:37:57.034004 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:57 crc kubenswrapper[5016]: E1211 10:37:57.034168 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:37:57 crc kubenswrapper[5016]: E1211 10:37:57.034187 5016 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 10:37:57 crc kubenswrapper[5016]: I1211 10:37:57.317052 5016 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4k8l5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.24:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:37:57 crc kubenswrapper[5016]: I1211 10:37:57.317142 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.24:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 10:37:59 crc kubenswrapper[5016]: E1211 10:37:59.314108 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="6.4s" Dec 11 10:37:59 crc kubenswrapper[5016]: I1211 10:37:59.501609 5016 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 11 10:37:59 crc kubenswrapper[5016]: I1211 10:37:59.501682 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 11 10:38:00 crc kubenswrapper[5016]: E1211 10:38:00.863034 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event=< Dec 11 10:38:00 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:38:00 crc kubenswrapper[5016]: body: Dec 11 10:38:00 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:38:00 crc kubenswrapper[5016]: > Dec 11 10:38:02 crc kubenswrapper[5016]: I1211 10:38:02.485411 5016 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 11 10:38:02 crc kubenswrapper[5016]: I1211 10:38:02.486054 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 11 10:38:03 crc kubenswrapper[5016]: I1211 10:38:03.477890 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:03 crc kubenswrapper[5016]: I1211 10:38:03.478178 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:05 crc kubenswrapper[5016]: E1211 10:38:05.471392 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 11 10:38:05 crc kubenswrapper[5016]: E1211 10:38:05.471847 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xqbf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-rzcjf_openshift-marketplace(89fda315-d1f2-484a-aa91-ec75f0b0227e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 10:38:05 crc kubenswrapper[5016]: E1211 10:38:05.473024 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-rzcjf" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" Dec 11 10:38:05 crc kubenswrapper[5016]: E1211 10:38:05.715143 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:38:05 crc kubenswrapper[5016]: I1211 10:38:05.734395 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 11 10:38:05 crc kubenswrapper[5016]: I1211 10:38:05.734451 5016 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2" exitCode=1 Dec 11 10:38:05 crc kubenswrapper[5016]: I1211 10:38:05.734480 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2"} Dec 11 10:38:05 crc kubenswrapper[5016]: I1211 10:38:05.735161 5016 scope.go:117] "RemoveContainer" containerID="1385802c1d95ac0be62b7c129386f67aba106d73eeebaa9c9a3529d62be412e2" Dec 11 10:38:05 crc kubenswrapper[5016]: I1211 10:38:05.735831 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:05 crc kubenswrapper[5016]: I1211 10:38:05.736680 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:05 crc kubenswrapper[5016]: I1211 10:38:05.737239 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:06 crc kubenswrapper[5016]: I1211 10:38:06.360699 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:38:06 crc kubenswrapper[5016]: I1211 10:38:06.379175 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:38:06 crc kubenswrapper[5016]: I1211 10:38:06.379241 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:38:07 crc kubenswrapper[5016]: E1211 10:38:07.241652 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:07Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:07Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:07Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:07Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:07 crc kubenswrapper[5016]: E1211 10:38:07.242152 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:07 crc kubenswrapper[5016]: E1211 10:38:07.242332 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:07 crc kubenswrapper[5016]: E1211 10:38:07.242523 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:07 crc kubenswrapper[5016]: E1211 10:38:07.242765 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:07 crc kubenswrapper[5016]: E1211 10:38:07.242780 5016 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 10:38:07 crc kubenswrapper[5016]: I1211 10:38:07.317535 5016 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4k8l5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.24:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:38:07 crc kubenswrapper[5016]: I1211 10:38:07.317606 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.24:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 10:38:07 crc kubenswrapper[5016]: I1211 10:38:07.317711 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:38:07 crc kubenswrapper[5016]: I1211 10:38:07.318244 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:07 crc kubenswrapper[5016]: I1211 10:38:07.318793 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:07 crc kubenswrapper[5016]: I1211 10:38:07.319256 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:07 crc kubenswrapper[5016]: I1211 10:38:07.319508 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:09 crc kubenswrapper[5016]: I1211 10:38:09.501459 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:38:10 crc kubenswrapper[5016]: E1211 10:38:10.865254 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event=< Dec 11 10:38:10 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:38:10 crc kubenswrapper[5016]: body: Dec 11 10:38:10 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:38:10 crc kubenswrapper[5016]: > Dec 11 10:38:12 crc kubenswrapper[5016]: I1211 10:38:12.485282 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:38:12 crc kubenswrapper[5016]: E1211 10:38:12.716251 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:38:12 crc kubenswrapper[5016]: I1211 10:38:12.933501 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:38:12 crc kubenswrapper[5016]: I1211 10:38:12.933596 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:38:13 crc kubenswrapper[5016]: I1211 10:38:13.478374 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:13 crc kubenswrapper[5016]: I1211 10:38:13.480164 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:13 crc kubenswrapper[5016]: I1211 10:38:13.480739 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:13 crc kubenswrapper[5016]: I1211 10:38:13.481082 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:16 crc kubenswrapper[5016]: I1211 10:38:16.379495 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:38:16 crc kubenswrapper[5016]: I1211 10:38:16.379561 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:38:17 crc kubenswrapper[5016]: I1211 10:38:17.317361 5016 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4k8l5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.24:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:38:17 crc kubenswrapper[5016]: I1211 10:38:17.317821 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.24:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 10:38:18 crc kubenswrapper[5016]: I1211 10:38:18.475681 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:18 crc kubenswrapper[5016]: I1211 10:38:18.476797 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:18 crc kubenswrapper[5016]: I1211 10:38:18.477634 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:18 crc kubenswrapper[5016]: I1211 10:38:18.478243 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:18 crc kubenswrapper[5016]: I1211 10:38:18.478851 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:18 crc kubenswrapper[5016]: E1211 10:38:18.518279 5016 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.53:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" volumeName="registry-storage" Dec 11 10:38:19 crc kubenswrapper[5016]: E1211 10:38:19.717572 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:38:20 crc kubenswrapper[5016]: E1211 10:38:20.866709 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event=< Dec 11 10:38:20 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:38:20 crc kubenswrapper[5016]: body: Dec 11 10:38:20 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:38:20 crc kubenswrapper[5016]: > Dec 11 10:38:23 crc kubenswrapper[5016]: I1211 10:38:23.480107 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:23 crc kubenswrapper[5016]: I1211 10:38:23.480667 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:23 crc kubenswrapper[5016]: I1211 10:38:23.481184 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:23 crc kubenswrapper[5016]: I1211 10:38:23.482831 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:23 crc kubenswrapper[5016]: I1211 10:38:23.483263 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:26 crc kubenswrapper[5016]: I1211 10:38:26.380839 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:38:26 crc kubenswrapper[5016]: I1211 10:38:26.381486 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:38:26 crc kubenswrapper[5016]: E1211 10:38:26.718410 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:38:27 crc kubenswrapper[5016]: I1211 10:38:27.316765 5016 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4k8l5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.24:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 11 10:38:27 crc kubenswrapper[5016]: I1211 10:38:27.317155 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.24:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 11 10:38:27 crc kubenswrapper[5016]: E1211 10:38:27.646099 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:27Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:27Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:27Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:27Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:27 crc kubenswrapper[5016]: E1211 10:38:27.646679 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:27 crc kubenswrapper[5016]: E1211 10:38:27.647059 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:27 crc kubenswrapper[5016]: E1211 10:38:27.647361 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:27 crc kubenswrapper[5016]: E1211 10:38:27.647624 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:27 crc kubenswrapper[5016]: E1211 10:38:27.647654 5016 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 10:38:30 crc kubenswrapper[5016]: E1211 10:38:30.869839 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event=< Dec 11 10:38:30 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:38:30 crc kubenswrapper[5016]: body: Dec 11 10:38:30 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:38:30 crc kubenswrapper[5016]: > Dec 11 10:38:32 crc kubenswrapper[5016]: I1211 10:38:32.910931 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-network-node-identity_network-node-identity-vrzqb_ef543e1b-8068-4ea3-b32a-61027b32e95d/approver/0.log" Dec 11 10:38:32 crc kubenswrapper[5016]: I1211 10:38:32.911335 5016 generic.go:334] "Generic (PLEG): container finished" podID="ef543e1b-8068-4ea3-b32a-61027b32e95d" containerID="c4fe6012132bf0a523ec1352b1daf4f8c517c373d7797ad2a4048d816aafa66a" exitCode=1 Dec 11 10:38:32 crc kubenswrapper[5016]: I1211 10:38:32.911414 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerDied","Data":"c4fe6012132bf0a523ec1352b1daf4f8c517c373d7797ad2a4048d816aafa66a"} Dec 11 10:38:32 crc kubenswrapper[5016]: I1211 10:38:32.912461 5016 scope.go:117] "RemoveContainer" containerID="c4fe6012132bf0a523ec1352b1daf4f8c517c373d7797ad2a4048d816aafa66a" Dec 11 10:38:32 crc kubenswrapper[5016]: I1211 10:38:32.912766 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:32 crc kubenswrapper[5016]: I1211 10:38:32.913733 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:32 crc kubenswrapper[5016]: I1211 10:38:32.914012 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:32 crc kubenswrapper[5016]: I1211 10:38:32.914243 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:32 crc kubenswrapper[5016]: I1211 10:38:32.914538 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:32 crc kubenswrapper[5016]: I1211 10:38:32.914802 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:33 crc kubenswrapper[5016]: I1211 10:38:33.477962 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:33 crc kubenswrapper[5016]: I1211 10:38:33.478569 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:33 crc kubenswrapper[5016]: I1211 10:38:33.479212 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:33 crc kubenswrapper[5016]: I1211 10:38:33.480186 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:33 crc kubenswrapper[5016]: I1211 10:38:33.480604 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:33 crc kubenswrapper[5016]: I1211 10:38:33.480988 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:33 crc kubenswrapper[5016]: E1211 10:38:33.719817 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:38:36 crc kubenswrapper[5016]: E1211 10:38:36.142931 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 11 10:38:36 crc kubenswrapper[5016]: E1211 10:38:36.143517 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tm8r2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-tp5lv_openshift-marketplace(7242e8c3-6ed6-4613-8fc9-1339be494e56): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 11 10:38:36 crc kubenswrapper[5016]: E1211 10:38:36.145189 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-tp5lv" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.186968 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.188127 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.188631 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.188869 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.189158 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.189421 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.189784 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.199068 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.199898 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.200785 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.201383 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.201808 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.202316 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.202715 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.297622 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-provider-selection\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.297677 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eda32bb5-f0ba-418d-936d-8f3909f5d759-kubelet-dir\") pod \"eda32bb5-f0ba-418d-936d-8f3909f5d759\" (UID: \"eda32bb5-f0ba-418d-936d-8f3909f5d759\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.297699 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-router-certs\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.297766 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/eda32bb5-f0ba-418d-936d-8f3909f5d759-var-lock\") pod \"eda32bb5-f0ba-418d-936d-8f3909f5d759\" (UID: \"eda32bb5-f0ba-418d-936d-8f3909f5d759\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.297831 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-service-ca\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.297855 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-session\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.297866 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eda32bb5-f0ba-418d-936d-8f3909f5d759-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "eda32bb5-f0ba-418d-936d-8f3909f5d759" (UID: "eda32bb5-f0ba-418d-936d-8f3909f5d759"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.297880 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eda32bb5-f0ba-418d-936d-8f3909f5d759-var-lock" (OuterVolumeSpecName: "var-lock") pod "eda32bb5-f0ba-418d-936d-8f3909f5d759" (UID: "eda32bb5-f0ba-418d-936d-8f3909f5d759"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.297898 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-audit-policies\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.298118 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-login\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.298219 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzb6q\" (UniqueName: \"kubernetes.io/projected/a91554fe-759f-4f9a-9d88-7b4d8650a08b-kube-api-access-tzb6q\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.298261 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-idp-0-file-data\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.298288 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a91554fe-759f-4f9a-9d88-7b4d8650a08b-audit-dir\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.298333 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-serving-cert\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.298377 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-cliconfig\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.298405 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-trusted-ca-bundle\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.298480 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-ocp-branding-template\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.298516 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-error\") pod \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\" (UID: \"a91554fe-759f-4f9a-9d88-7b4d8650a08b\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.298561 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eda32bb5-f0ba-418d-936d-8f3909f5d759-kube-api-access\") pod \"eda32bb5-f0ba-418d-936d-8f3909f5d759\" (UID: \"eda32bb5-f0ba-418d-936d-8f3909f5d759\") " Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.298581 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a91554fe-759f-4f9a-9d88-7b4d8650a08b-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.298873 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.299127 5016 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eda32bb5-f0ba-418d-936d-8f3909f5d759-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.299149 5016 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/eda32bb5-f0ba-418d-936d-8f3909f5d759-var-lock\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.299163 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.299177 5016 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a91554fe-759f-4f9a-9d88-7b4d8650a08b-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.299179 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.299788 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.300533 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.304262 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a91554fe-759f-4f9a-9d88-7b4d8650a08b-kube-api-access-tzb6q" (OuterVolumeSpecName: "kube-api-access-tzb6q") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "kube-api-access-tzb6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.304321 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.304632 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eda32bb5-f0ba-418d-936d-8f3909f5d759-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "eda32bb5-f0ba-418d-936d-8f3909f5d759" (UID: "eda32bb5-f0ba-418d-936d-8f3909f5d759"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.304812 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.304986 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.305166 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.305899 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.306172 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.306291 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.307738 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "a91554fe-759f-4f9a-9d88-7b4d8650a08b" (UID: "a91554fe-759f-4f9a-9d88-7b4d8650a08b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.379314 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.379384 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.400887 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.400981 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.401008 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.401031 5016 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.401050 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.401071 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzb6q\" (UniqueName: \"kubernetes.io/projected/a91554fe-759f-4f9a-9d88-7b4d8650a08b-kube-api-access-tzb6q\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.401092 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.401110 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.401130 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.401149 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.401172 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.401191 5016 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a91554fe-759f-4f9a-9d88-7b4d8650a08b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.401210 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eda32bb5-f0ba-418d-936d-8f3909f5d759-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.941170 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" event={"ID":"a91554fe-759f-4f9a-9d88-7b4d8650a08b","Type":"ContainerDied","Data":"31e8b1100249974222875fee566bee010d4107f11e39abb910092b690e0af032"} Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.941215 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.942652 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.943156 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"eda32bb5-f0ba-418d-936d-8f3909f5d759","Type":"ContainerDied","Data":"25d3ac9d6c110c3a4d6abc6a28da18d39912c24f2e9d1e72a3280ccb76144adf"} Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.943199 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25d3ac9d6c110c3a4d6abc6a28da18d39912c24f2e9d1e72a3280ccb76144adf" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.943249 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.943545 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.944024 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.944729 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.945062 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.945323 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.962503 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.962959 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.963254 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.963502 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.963754 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.964027 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.968230 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.968678 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.969126 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.969564 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.969879 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:36 crc kubenswrapper[5016]: I1211 10:38:36.970279 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:38 crc kubenswrapper[5016]: I1211 10:38:38.935418 5016 scope.go:117] "RemoveContainer" containerID="f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351" Dec 11 10:38:38 crc kubenswrapper[5016]: I1211 10:38:38.969863 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_downloads-7954f5f757-88s4j_ed376fff-5d17-48b1-b48c-ec0c3548dde4/download-server/1.log" Dec 11 10:38:40 crc kubenswrapper[5016]: W1211 10:38:40.114461 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-ea57287a2f68be4fc2a1409c68e8678a35827ea061d4280f88b7e7dc1eb2699f WatchSource:0}: Error finding container ea57287a2f68be4fc2a1409c68e8678a35827ea061d4280f88b7e7dc1eb2699f: Status 404 returned error can't find the container with id ea57287a2f68be4fc2a1409c68e8678a35827ea061d4280f88b7e7dc1eb2699f Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.205066 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.206154 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.206739 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.207275 5016 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.207672 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.208037 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.208855 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.209159 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.209462 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.254214 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.254655 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.254739 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.254530 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.254699 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.254866 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.255060 5016 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.255077 5016 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.255087 5016 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 11 10:38:40 crc kubenswrapper[5016]: E1211 10:38:40.721331 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:38:40 crc kubenswrapper[5016]: E1211 10:38:40.872315 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event=< Dec 11 10:38:40 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:38:40 crc kubenswrapper[5016]: body: Dec 11 10:38:40 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:38:40 crc kubenswrapper[5016]: > Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.989456 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.991398 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:38:40 crc kubenswrapper[5016]: I1211 10:38:40.992885 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"ea57287a2f68be4fc2a1409c68e8678a35827ea061d4280f88b7e7dc1eb2699f"} Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.007856 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.008068 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.008524 5016 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.009384 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.009921 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.010434 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.010797 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.485814 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.504313 5016 scope.go:117] "RemoveContainer" containerID="af4cdd2cafc9fea690f4c35a1e0f4de9d56d8a8d9d4a0c7b8889f7d8c20723fc" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.529239 5016 scope.go:117] "RemoveContainer" containerID="31d86d5f7654802bd05a4a9c4d506deb5b9bcf5fd1ae02722093170105a93409" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.547534 5016 scope.go:117] "RemoveContainer" containerID="f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351" Dec 11 10:38:41 crc kubenswrapper[5016]: E1211 10:38:41.548346 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\": container with ID starting with f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351 not found: ID does not exist" containerID="f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.548405 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351"} err="failed to get container status \"f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\": rpc error: code = NotFound desc = could not find container \"f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351\": container with ID starting with f7d28c0c52524d134b41411eadb2a9c0fe38fe9f844fa857d1aab92c9d9d4351 not found: ID does not exist" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.548446 5016 scope.go:117] "RemoveContainer" containerID="933e7a9db707412d1462733ef2c44d96b5f3ad430c2bcfcd2a54f4f4948e1ff4" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.569836 5016 scope.go:117] "RemoveContainer" containerID="803e8d44ce72f577208c0259a586e1dbd0bf1631063822e1ee8620b63dd19e43" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.589617 5016 scope.go:117] "RemoveContainer" containerID="8f581f8e01f83c014d5cb300316ada1c18124871b397338457abfca6779ef166" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.603292 5016 scope.go:117] "RemoveContainer" containerID="05a03d238d8edc25742be03aaf02c47fb102ae8ba550b9e793753cc7c2e8d498" Dec 11 10:38:41 crc kubenswrapper[5016]: I1211 10:38:41.623331 5016 scope.go:117] "RemoveContainer" containerID="2f0697d9f898dad5327dd0844f16217c8b8cb5e9621b3fe10e2cce9d0731df6e" Dec 11 10:38:42 crc kubenswrapper[5016]: I1211 10:38:42.933240 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:38:42 crc kubenswrapper[5016]: I1211 10:38:42.934219 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:38:42 crc kubenswrapper[5016]: I1211 10:38:42.934308 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:38:42 crc kubenswrapper[5016]: I1211 10:38:42.934931 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b7028fe427b7682d3e5b7f2a5e7fedee9c12ebb5f609d4c361ba3d5fed28bee0"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 10:38:42 crc kubenswrapper[5016]: I1211 10:38:42.935012 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://b7028fe427b7682d3e5b7f2a5e7fedee9c12ebb5f609d4c361ba3d5fed28bee0" gracePeriod=600 Dec 11 10:38:43 crc kubenswrapper[5016]: I1211 10:38:43.490014 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:43 crc kubenswrapper[5016]: I1211 10:38:43.490666 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:43 crc kubenswrapper[5016]: I1211 10:38:43.491324 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:43 crc kubenswrapper[5016]: I1211 10:38:43.492017 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:43 crc kubenswrapper[5016]: I1211 10:38:43.492339 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:43 crc kubenswrapper[5016]: I1211 10:38:43.492743 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:44 crc kubenswrapper[5016]: I1211 10:38:44.018570 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6sdb" event={"ID":"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7","Type":"ContainerStarted","Data":"b5717445e8a0a0a5666d17fa7c7d1a4950c927eafdac25bf230cfaa389b392fc"} Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.027327 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rx8bv" event={"ID":"2f07c0be-3ff2-4b4a-86f1-67da5394f101","Type":"ContainerStarted","Data":"1c79ed5e5e8ae4c87ce81651d95475d7326df4f01182b3b46ae35a94f899301c"} Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.030567 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_downloads-7954f5f757-88s4j_ed376fff-5d17-48b1-b48c-ec0c3548dde4/download-server/1.log" Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.030690 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-88s4j" event={"ID":"ed376fff-5d17-48b1-b48c-ec0c3548dde4","Type":"ContainerStarted","Data":"166c97fb2f03830534b76bdcfd7d8cba5dec9992ad224301346bf04dd6b5b2dd"} Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.032462 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vb25j" event={"ID":"edb91373-b8a5-4426-9a6b-1fbb6c9f2846","Type":"ContainerStarted","Data":"ccfdfec75c4e77a73d5caab4411aa800938e0d4074f1dfff044d49a9b74b4566"} Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.034141 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hqmxw" event={"ID":"f393088a-dacc-4673-8074-d6be25842a84","Type":"ContainerStarted","Data":"d03c7a1c53056a349d7658275d508f3cfd4d2aa81ffeb9d7e8e4319df8a11ac3"} Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.035818 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r5rgf" event={"ID":"623ddc04-83e2-42ac-bcac-59b72d2fac2a","Type":"ContainerStarted","Data":"733df02a42dee187fdc183518fac16020eb95499f4b1cfc4062760e56d363f32"} Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.038296 5016 generic.go:334] "Generic (PLEG): container finished" podID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" containerID="b5717445e8a0a0a5666d17fa7c7d1a4950c927eafdac25bf230cfaa389b392fc" exitCode=0 Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.038371 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6sdb" event={"ID":"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7","Type":"ContainerDied","Data":"b5717445e8a0a0a5666d17fa7c7d1a4950c927eafdac25bf230cfaa389b392fc"} Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.040083 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc9qn" event={"ID":"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4","Type":"ContainerStarted","Data":"b431768e4cd8d8a61fe495ce85d6971147adc545649c29e350f2fa7ac028c6ad"} Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.043836 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.043919 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"721dd06a42f948148349dc6d282a7f4665689193d51caafdb45a21ac3a21d27c"} Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.046602 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-network-node-identity_network-node-identity-vrzqb_ef543e1b-8068-4ea3-b32a-61027b32e95d/approver/0.log" Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.047240 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"723748ad6a89de4ac6c1375fa6ff2c2fb8844edb0fd2b061b606979b71b804ee"} Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.049464 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"fb8820d3d9ee638e20951da811b9d705c1a02a15688cacce52745491124d8557"} Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.051974 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="b7028fe427b7682d3e5b7f2a5e7fedee9c12ebb5f609d4c361ba3d5fed28bee0" exitCode=0 Dec 11 10:38:45 crc kubenswrapper[5016]: I1211 10:38:45.052022 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"b7028fe427b7682d3e5b7f2a5e7fedee9c12ebb5f609d4c361ba3d5fed28bee0"} Dec 11 10:38:46 crc kubenswrapper[5016]: I1211 10:38:46.380020 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:38:46 crc kubenswrapper[5016]: I1211 10:38:46.380099 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:38:46 crc kubenswrapper[5016]: I1211 10:38:46.473570 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:38:46 crc kubenswrapper[5016]: I1211 10:38:46.474306 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:46 crc kubenswrapper[5016]: I1211 10:38:46.474709 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:46 crc kubenswrapper[5016]: I1211 10:38:46.475094 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:46 crc kubenswrapper[5016]: I1211 10:38:46.475591 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:46 crc kubenswrapper[5016]: I1211 10:38:46.475975 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:46 crc kubenswrapper[5016]: I1211 10:38:46.476242 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:46 crc kubenswrapper[5016]: I1211 10:38:46.488941 5016 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="071ea1a0-65ea-49d7-a4b1-0f8a312c0112" Dec 11 10:38:46 crc kubenswrapper[5016]: I1211 10:38:46.489018 5016 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="071ea1a0-65ea-49d7-a4b1-0f8a312c0112" Dec 11 10:38:46 crc kubenswrapper[5016]: E1211 10:38:46.489388 5016 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:38:46 crc kubenswrapper[5016]: I1211 10:38:46.489844 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:38:47 crc kubenswrapper[5016]: I1211 10:38:47.067546 5016 generic.go:334] "Generic (PLEG): container finished" podID="f393088a-dacc-4673-8074-d6be25842a84" containerID="d03c7a1c53056a349d7658275d508f3cfd4d2aa81ffeb9d7e8e4319df8a11ac3" exitCode=0 Dec 11 10:38:47 crc kubenswrapper[5016]: I1211 10:38:47.067642 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hqmxw" event={"ID":"f393088a-dacc-4673-8074-d6be25842a84","Type":"ContainerDied","Data":"d03c7a1c53056a349d7658275d508f3cfd4d2aa81ffeb9d7e8e4319df8a11ac3"} Dec 11 10:38:47 crc kubenswrapper[5016]: I1211 10:38:47.069873 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"471ea8d8d987568918328b004110679e9906c9084e548b86cf6c9b7e4754418b"} Dec 11 10:38:47 crc kubenswrapper[5016]: I1211 10:38:47.477322 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:47 crc kubenswrapper[5016]: I1211 10:38:47.478988 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:47 crc kubenswrapper[5016]: I1211 10:38:47.479451 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:47 crc kubenswrapper[5016]: I1211 10:38:47.481246 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:47 crc kubenswrapper[5016]: E1211 10:38:47.481715 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-tp5lv" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" Dec 11 10:38:47 crc kubenswrapper[5016]: I1211 10:38:47.481805 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:47 crc kubenswrapper[5016]: I1211 10:38:47.482679 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:47 crc kubenswrapper[5016]: I1211 10:38:47.483458 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:47 crc kubenswrapper[5016]: E1211 10:38:47.722567 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:38:47 crc kubenswrapper[5016]: E1211 10:38:47.913717 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:47Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:47Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:47Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:47Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[],\\\"sizeBytes\\\":1202228571},{\\\"names\\\":[],\\\"sizeBytes\\\":1154573130},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:47 crc kubenswrapper[5016]: E1211 10:38:47.914265 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:47 crc kubenswrapper[5016]: E1211 10:38:47.914979 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:47 crc kubenswrapper[5016]: E1211 10:38:47.915403 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:47 crc kubenswrapper[5016]: E1211 10:38:47.915761 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:47 crc kubenswrapper[5016]: E1211 10:38:47.915924 5016 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 10:38:50 crc kubenswrapper[5016]: I1211 10:38:50.094087 5016 generic.go:334] "Generic (PLEG): container finished" podID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" containerID="ccfdfec75c4e77a73d5caab4411aa800938e0d4074f1dfff044d49a9b74b4566" exitCode=0 Dec 11 10:38:50 crc kubenswrapper[5016]: I1211 10:38:50.094148 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vb25j" event={"ID":"edb91373-b8a5-4426-9a6b-1fbb6c9f2846","Type":"ContainerDied","Data":"ccfdfec75c4e77a73d5caab4411aa800938e0d4074f1dfff044d49a9b74b4566"} Dec 11 10:38:50 crc kubenswrapper[5016]: E1211 10:38:50.873536 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event=< Dec 11 10:38:50 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:38:50 crc kubenswrapper[5016]: body: Dec 11 10:38:50 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:38:50 crc kubenswrapper[5016]: > Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.106689 5016 generic.go:334] "Generic (PLEG): container finished" podID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" containerID="733df02a42dee187fdc183518fac16020eb95499f4b1cfc4062760e56d363f32" exitCode=0 Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.106780 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r5rgf" event={"ID":"623ddc04-83e2-42ac-bcac-59b72d2fac2a","Type":"ContainerDied","Data":"733df02a42dee187fdc183518fac16020eb95499f4b1cfc4062760e56d363f32"} Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.109437 5016 generic.go:334] "Generic (PLEG): container finished" podID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" containerID="b431768e4cd8d8a61fe495ce85d6971147adc545649c29e350f2fa7ac028c6ad" exitCode=0 Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.109486 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc9qn" event={"ID":"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4","Type":"ContainerDied","Data":"b431768e4cd8d8a61fe495ce85d6971147adc545649c29e350f2fa7ac028c6ad"} Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.111632 5016 generic.go:334] "Generic (PLEG): container finished" podID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" containerID="1c79ed5e5e8ae4c87ce81651d95475d7326df4f01182b3b46ae35a94f899301c" exitCode=0 Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.111666 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rx8bv" event={"ID":"2f07c0be-3ff2-4b4a-86f1-67da5394f101","Type":"ContainerDied","Data":"1c79ed5e5e8ae4c87ce81651d95475d7326df4f01182b3b46ae35a94f899301c"} Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.112894 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.113377 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.113934 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.114686 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.115097 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.115489 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.115856 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:51 crc kubenswrapper[5016]: I1211 10:38:51.116214 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:52 crc kubenswrapper[5016]: I1211 10:38:52.118091 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:52 crc kubenswrapper[5016]: I1211 10:38:52.118913 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:52 crc kubenswrapper[5016]: I1211 10:38:52.119443 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:52 crc kubenswrapper[5016]: I1211 10:38:52.119772 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:52 crc kubenswrapper[5016]: I1211 10:38:52.120255 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:52 crc kubenswrapper[5016]: I1211 10:38:52.120731 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:52 crc kubenswrapper[5016]: I1211 10:38:52.121096 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:52 crc kubenswrapper[5016]: I1211 10:38:52.121380 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:52 crc kubenswrapper[5016]: I1211 10:38:52.121594 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.126138 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.127438 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.128459 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.128749 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.129369 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.130056 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.130486 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.130976 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.131432 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.132719 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.484019 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.485797 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.486332 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.486665 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.487215 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.487664 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.488260 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.488811 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.489410 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.489931 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:53 crc kubenswrapper[5016]: I1211 10:38:53.490503 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:54 crc kubenswrapper[5016]: E1211 10:38:54.724648 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.140231 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.140230 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.140282 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.141557 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.141797 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.141993 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.142296 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.142821 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.144079 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.144461 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.145081 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.145551 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.146102 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.146478 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.146960 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.147491 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.147689 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.147839 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.148041 5016 status_manager.go:851] "Failed to get status for pod" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" pod="openshift-console/downloads-7954f5f757-88s4j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-88s4j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.148212 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.148389 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.148572 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.148733 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.148890 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.149112 5016 status_manager.go:851] "Failed to get status for pod" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" pod="openshift-marketplace/community-operators-r5rgf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-r5rgf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.149268 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.149423 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.149571 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.149766 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:55 crc kubenswrapper[5016]: I1211 10:38:55.149973 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.149582 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"83b3c470b285b2b57bf30d006223b6f9a3a812915c32fc1f6a9b8d90fd4fd77c"} Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.361744 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.366484 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.367234 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.367707 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.368297 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.368657 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.369079 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.369438 5016 status_manager.go:851] "Failed to get status for pod" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" pod="openshift-console/downloads-7954f5f757-88s4j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-88s4j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.369676 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.369929 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.370277 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.370567 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.370820 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.371061 5016 status_manager.go:851] "Failed to get status for pod" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" pod="openshift-marketplace/community-operators-r5rgf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-r5rgf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.371290 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.371527 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.371747 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.379519 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-88s4j" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.379991 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.380057 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.380072 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.380169 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.380491 5016 patch_prober.go:28] interesting pod/downloads-7954f5f757-88s4j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Dec 11 10:38:56 crc kubenswrapper[5016]: I1211 10:38:56.380564 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-88s4j" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.158577 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.163783 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.164651 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.165235 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.165925 5016 status_manager.go:851] "Failed to get status for pod" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" pod="openshift-console/downloads-7954f5f757-88s4j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-88s4j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.166371 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.166800 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.167329 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.167833 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.168611 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.169045 5016 status_manager.go:851] "Failed to get status for pod" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" pod="openshift-marketplace/community-operators-r5rgf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-r5rgf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.169548 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.170117 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.170492 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.170980 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.171449 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:57 crc kubenswrapper[5016]: I1211 10:38:57.172039 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:58 crc kubenswrapper[5016]: E1211 10:38:58.047225 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:58Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:58Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:58Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:38:58Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[],\\\"sizeBytes\\\":1202228571},{\\\"names\\\":[],\\\"sizeBytes\\\":1154573130},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:58 crc kubenswrapper[5016]: E1211 10:38:58.048313 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:58 crc kubenswrapper[5016]: E1211 10:38:58.048771 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:58 crc kubenswrapper[5016]: E1211 10:38:58.049218 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:58 crc kubenswrapper[5016]: E1211 10:38:58.049734 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:38:58 crc kubenswrapper[5016]: E1211 10:38:58.049776 5016 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 10:39:00 crc kubenswrapper[5016]: E1211 10:39:00.875278 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event=< Dec 11 10:39:00 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:39:00 crc kubenswrapper[5016]: body: Dec 11 10:39:00 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:39:00 crc kubenswrapper[5016]: > Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.182604 5016 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="071ea1a0-65ea-49d7-a4b1-0f8a312c0112" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.183010 5016 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="071ea1a0-65ea-49d7-a4b1-0f8a312c0112" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.183075 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.183418 5016 status_manager.go:851] "Failed to get status for pod" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" pod="openshift-marketplace/community-operators-r5rgf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-r5rgf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: E1211 10:39:01.183627 5016 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.183826 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.184502 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.185242 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.185610 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.185914 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.186312 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.186643 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.187003 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.187335 5016 status_manager.go:851] "Failed to get status for pod" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" pod="openshift-console/downloads-7954f5f757-88s4j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-88s4j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.187676 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.187914 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.188218 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.188570 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.475498 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.476250 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.477496 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.478107 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.478389 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.478715 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.479037 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.479350 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.479589 5016 status_manager.go:851] "Failed to get status for pod" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" pod="openshift-console/downloads-7954f5f757-88s4j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-88s4j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.479832 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.480034 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.480196 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.480384 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.480680 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: I1211 10:39:01.480999 5016 status_manager.go:851] "Failed to get status for pod" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" pod="openshift-marketplace/community-operators-r5rgf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-r5rgf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:01 crc kubenswrapper[5016]: E1211 10:39:01.726013 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.229198 5016 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="83b3c470b285b2b57bf30d006223b6f9a3a812915c32fc1f6a9b8d90fd4fd77c" exitCode=0 Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.229286 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"83b3c470b285b2b57bf30d006223b6f9a3a812915c32fc1f6a9b8d90fd4fd77c"} Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.229889 5016 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="071ea1a0-65ea-49d7-a4b1-0f8a312c0112" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.229904 5016 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="071ea1a0-65ea-49d7-a4b1-0f8a312c0112" Dec 11 10:39:03 crc kubenswrapper[5016]: E1211 10:39:03.230860 5016 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.231023 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.231611 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.232046 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.232500 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.232876 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.233378 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.233422 5016 generic.go:334] "Generic (PLEG): container finished" podID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerID="c906b2e6c1fdf3d3c54425c0ce75c9e2602c7ad43116b532768fcb1075c0b67d" exitCode=0 Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.233496 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" event={"ID":"d8539d49-e453-4b15-a4d6-0e0583b93390","Type":"ContainerDied","Data":"c906b2e6c1fdf3d3c54425c0ce75c9e2602c7ad43116b532768fcb1075c0b67d"} Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.233777 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.233906 5016 scope.go:117] "RemoveContainer" containerID="c906b2e6c1fdf3d3c54425c0ce75c9e2602c7ad43116b532768fcb1075c0b67d" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.234377 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.234655 5016 status_manager.go:851] "Failed to get status for pod" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" pod="openshift-console/downloads-7954f5f757-88s4j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-88s4j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.234973 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.235285 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.235491 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.235762 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.236104 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.236354 5016 status_manager.go:851] "Failed to get status for pod" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" pod="openshift-marketplace/community-operators-r5rgf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-r5rgf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.236811 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.237073 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.237291 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"793991a7e6d358bd8fbd2f0bae8254371015f24f8ff9bca5c69c392121b0afd1"} Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.237432 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.237827 5016 status_manager.go:851] "Failed to get status for pod" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" pod="openshift-console/downloads-7954f5f757-88s4j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-88s4j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.238205 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.238694 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.239018 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.239345 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.239787 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.240120 5016 status_manager.go:851] "Failed to get status for pod" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" pod="openshift-marketplace/community-operators-r5rgf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-r5rgf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.240475 5016 status_manager.go:851] "Failed to get status for pod" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-kp5bk\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.240968 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.241282 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.241677 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.242128 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.242548 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.478143 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.479061 5016 status_manager.go:851] "Failed to get status for pod" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" pod="openshift-console/downloads-7954f5f757-88s4j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-88s4j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.479392 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.479770 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.480048 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.480262 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.480619 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.480875 5016 status_manager.go:851] "Failed to get status for pod" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" pod="openshift-marketplace/community-operators-r5rgf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-r5rgf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.481356 5016 status_manager.go:851] "Failed to get status for pod" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-kp5bk\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.482044 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.482396 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.482954 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.483357 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.483689 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.484005 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:03 crc kubenswrapper[5016]: I1211 10:39:03.484239 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.386746 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-88s4j" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.387538 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.387977 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.388617 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.389104 5016 status_manager.go:851] "Failed to get status for pod" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" pod="openshift-console/downloads-7954f5f757-88s4j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-88s4j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.389790 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.390122 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.390459 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.390781 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.391141 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.391482 5016 status_manager.go:851] "Failed to get status for pod" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" pod="openshift-marketplace/community-operators-r5rgf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-r5rgf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.391782 5016 status_manager.go:851] "Failed to get status for pod" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-kp5bk\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.392078 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.392367 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.392679 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.393003 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.393352 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.417836 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:39:06 crc kubenswrapper[5016]: I1211 10:39:06.417895 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:39:08 crc kubenswrapper[5016]: E1211 10:39:08.247838 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:39:08Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:39:08Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:39:08Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:39:08Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[],\\\"sizeBytes\\\":1202228571},{\\\"names\\\":[],\\\"sizeBytes\\\":1154573130},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:08 crc kubenswrapper[5016]: E1211 10:39:08.249066 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:08 crc kubenswrapper[5016]: E1211 10:39:08.249755 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:08 crc kubenswrapper[5016]: E1211 10:39:08.250130 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:08 crc kubenswrapper[5016]: E1211 10:39:08.250859 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:08 crc kubenswrapper[5016]: E1211 10:39:08.250993 5016 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 10:39:08 crc kubenswrapper[5016]: E1211 10:39:08.727327 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:39:10 crc kubenswrapper[5016]: E1211 10:39:10.877586 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event=< Dec 11 10:39:10 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:39:10 crc kubenswrapper[5016]: body: Dec 11 10:39:10 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:39:10 crc kubenswrapper[5016]: > Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.479314 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.480548 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.481487 5016 status_manager.go:851] "Failed to get status for pod" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" pod="openshift-console/downloads-7954f5f757-88s4j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-88s4j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.482125 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.482651 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.483222 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.483695 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.484277 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.484715 5016 status_manager.go:851] "Failed to get status for pod" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" pod="openshift-marketplace/community-operators-r5rgf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-r5rgf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.485144 5016 status_manager.go:851] "Failed to get status for pod" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-kp5bk\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.485837 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.486469 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.486914 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.487368 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.487969 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:13 crc kubenswrapper[5016]: I1211 10:39:13.488284 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.317159 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.317849 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.318643 5016 status_manager.go:851] "Failed to get status for pod" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" pod="openshift-console/downloads-7954f5f757-88s4j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-88s4j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.318968 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.319260 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.319616 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.319869 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.320209 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.320680 5016 status_manager.go:851] "Failed to get status for pod" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-2x7t7\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.320919 5016 status_manager.go:851] "Failed to get status for pod" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" pod="openshift-marketplace/community-operators-r5rgf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-r5rgf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.321270 5016 status_manager.go:851] "Failed to get status for pod" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-kp5bk\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.321676 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.321974 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.322241 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.322512 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.322804 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: I1211 10:39:15.323363 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:15 crc kubenswrapper[5016]: E1211 10:39:15.728778 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:39:18 crc kubenswrapper[5016]: E1211 10:39:18.290179 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:39:18Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:39:18Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:39:18Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:39:18Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[],\\\"sizeBytes\\\":1202228571},{\\\"names\\\":[],\\\"sizeBytes\\\":1154573130},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:18 crc kubenswrapper[5016]: E1211 10:39:18.293291 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:18 crc kubenswrapper[5016]: E1211 10:39:18.293994 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:18 crc kubenswrapper[5016]: E1211 10:39:18.294372 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:18 crc kubenswrapper[5016]: E1211 10:39:18.294743 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:18 crc kubenswrapper[5016]: E1211 10:39:18.294777 5016 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 10:39:20 crc kubenswrapper[5016]: E1211 10:39:20.878427 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event=< Dec 11 10:39:20 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:39:20 crc kubenswrapper[5016]: body: Dec 11 10:39:20 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:39:20 crc kubenswrapper[5016]: > Dec 11 10:39:22 crc kubenswrapper[5016]: E1211 10:39:22.730901 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.478610 5016 status_manager.go:851] "Failed to get status for pod" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-2x7t7\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.479158 5016 status_manager.go:851] "Failed to get status for pod" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.479569 5016 status_manager.go:851] "Failed to get status for pod" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" pod="openshift-marketplace/community-operators-r5rgf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-r5rgf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.480056 5016 status_manager.go:851] "Failed to get status for pod" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-kp5bk\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.480445 5016 status_manager.go:851] "Failed to get status for pod" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" pod="openshift-authentication/oauth-openshift-558db77b4-4k8l5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-4k8l5\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.481041 5016 status_manager.go:851] "Failed to get status for pod" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" pod="openshift-marketplace/certified-operators-tp5lv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-tp5lv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.481338 5016 status_manager.go:851] "Failed to get status for pod" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" pod="openshift-marketplace/redhat-operators-rx8bv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rx8bv\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.481731 5016 status_manager.go:851] "Failed to get status for pod" podUID="f393088a-dacc-4673-8074-d6be25842a84" pod="openshift-marketplace/redhat-marketplace-hqmxw" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-hqmxw\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.482381 5016 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.482653 5016 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.483177 5016 status_manager.go:851] "Failed to get status for pod" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" pod="openshift-marketplace/redhat-operators-rzcjf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-rzcjf\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.483692 5016 status_manager.go:851] "Failed to get status for pod" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" pod="openshift-marketplace/community-operators-c6sdb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-c6sdb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.484023 5016 status_manager.go:851] "Failed to get status for pod" podUID="ed376fff-5d17-48b1-b48c-ec0c3548dde4" pod="openshift-console/downloads-7954f5f757-88s4j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-88s4j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.484332 5016 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.484680 5016 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.485006 5016 status_manager.go:851] "Failed to get status for pod" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" pod="openshift-marketplace/certified-operators-fc9qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-fc9qn\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:23 crc kubenswrapper[5016]: I1211 10:39:23.485272 5016 status_manager.go:851] "Failed to get status for pod" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" pod="openshift-marketplace/redhat-marketplace-vb25j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-vb25j\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:28 crc kubenswrapper[5016]: E1211 10:39:28.631278 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:39:28Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:39:28Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:39:28Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-11T10:39:28Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[],\\\"sizeBytes\\\":1202228571},{\\\"names\\\":[],\\\"sizeBytes\\\":1154573130},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:29 crc kubenswrapper[5016]: E1211 10:39:28.632329 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:29 crc kubenswrapper[5016]: E1211 10:39:28.632828 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:29 crc kubenswrapper[5016]: E1211 10:39:28.633176 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:29 crc kubenswrapper[5016]: E1211 10:39:28.633453 5016 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" Dec 11 10:39:29 crc kubenswrapper[5016]: E1211 10:39:28.633475 5016 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 11 10:39:29 crc kubenswrapper[5016]: E1211 10:39:29.732565 5016 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.53:6443: connect: connection refused" interval="7s" Dec 11 10:39:30 crc kubenswrapper[5016]: E1211 10:39:30.880484 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event=< Dec 11 10:39:30 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:39:30 crc kubenswrapper[5016]: body: Dec 11 10:39:30 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:39:30 crc kubenswrapper[5016]: > Dec 11 10:39:30 crc kubenswrapper[5016]: E1211 10:39:30.880627 5016 event.go:307] "Unable to write event (retry limit exceeded!)" event=< Dec 11 10:39:30 crc kubenswrapper[5016]: &Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd0570e39 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:ProbeError,Message:Readiness probe error: Get "https://10.217.0.24:6443/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Dec 11 10:39:30 crc kubenswrapper[5016]: body: Dec 11 10:39:30 crc kubenswrapper[5016]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,LastTimestamp:2025-12-11 10:37:47.317128761 +0000 UTC m=+184.135688340,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Dec 11 10:39:30 crc kubenswrapper[5016]: > Dec 11 10:39:30 crc kubenswrapper[5016]: E1211 10:39:30.881387 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event="&Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd05841bb openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:Unhealthy,Message:Readiness probe failed: Get \"https://10.217.0.24:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317207483 +0000 UTC m=+184.135767062,LastTimestamp:2025-12-11 10:37:47.317207483 +0000 UTC m=+184.135767062,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 11 10:39:31 crc kubenswrapper[5016]: E1211 10:39:31.894341 5016 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.53:6443: connect: connection refused" event="&Event{ObjectMeta:{oauth-openshift-558db77b4-4k8l5.188022efd05841bb openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-558db77b4-4k8l5,UID:a91554fe-759f-4f9a-9d88-7b4d8650a08b,APIVersion:v1,ResourceVersion:27112,FieldPath:spec.containers{oauth-openshift},},Reason:Unhealthy,Message:Readiness probe failed: Get \"https://10.217.0.24:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-11 10:37:47.317207483 +0000 UTC m=+184.135767062,LastTimestamp:2025-12-11 10:37:47.317207483 +0000 UTC m=+184.135767062,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.425118 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.425445 5016 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1" exitCode=0 Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.425517 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1"} Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.426298 5016 scope.go:117] "RemoveContainer" containerID="c9d8e36349c4015e531dc042d1dd357ce5470f7727ce9798c5c5c8d2a6562dc1" Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.428533 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcjf" event={"ID":"89fda315-d1f2-484a-aa91-ec75f0b0227e","Type":"ContainerStarted","Data":"1aa80eb5a334fb04c2d4caad344fa5bc35a2f4815cd89e472726de20a9f109dc"} Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.435215 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"dda241782326efa4c942c144d02e3ac70663dc048035fcd002dd48981538fdeb"} Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.437530 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r5rgf" event={"ID":"623ddc04-83e2-42ac-bcac-59b72d2fac2a","Type":"ContainerStarted","Data":"4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6"} Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.445027 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6sdb" event={"ID":"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7","Type":"ContainerStarted","Data":"45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e"} Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.446925 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rx8bv" event={"ID":"2f07c0be-3ff2-4b4a-86f1-67da5394f101","Type":"ContainerStarted","Data":"ce79c4c37bd26cf001092c437813df70aea50b475de9e346c630fe3376756330"} Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.448211 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/1.log" Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.448575 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" event={"ID":"d8539d49-e453-4b15-a4d6-0e0583b93390","Type":"ContainerStarted","Data":"ed84fbbb78234326717842bc971216184e3ae7a96479896d7aebbe056f8245a7"} Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.449108 5016 scope.go:117] "RemoveContainer" containerID="ed84fbbb78234326717842bc971216184e3ae7a96479896d7aebbe056f8245a7" Dec 11 10:39:32 crc kubenswrapper[5016]: E1211 10:39:32.449327 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.450465 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hqmxw" event={"ID":"f393088a-dacc-4673-8074-d6be25842a84","Type":"ContainerStarted","Data":"d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c"} Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.453523 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vb25j" event={"ID":"edb91373-b8a5-4426-9a6b-1fbb6c9f2846","Type":"ContainerStarted","Data":"1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53"} Dec 11 10:39:32 crc kubenswrapper[5016]: I1211 10:39:32.455961 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc9qn" event={"ID":"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4","Type":"ContainerStarted","Data":"a292df12190a62073c2372ceb6f52804000afd75f5a1a64e492f7c67a12d73d6"} Dec 11 10:39:33 crc kubenswrapper[5016]: I1211 10:39:33.565129 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 11 10:39:33 crc kubenswrapper[5016]: I1211 10:39:33.565222 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e5d26faf59736e40506d10b780ae9155fa8c23720c9648b78647cab55e0b98cd"} Dec 11 10:39:33 crc kubenswrapper[5016]: I1211 10:39:33.582196 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/1.log" Dec 11 10:39:33 crc kubenswrapper[5016]: I1211 10:39:33.586516 5016 generic.go:334] "Generic (PLEG): container finished" podID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerID="ed84fbbb78234326717842bc971216184e3ae7a96479896d7aebbe056f8245a7" exitCode=1 Dec 11 10:39:33 crc kubenswrapper[5016]: I1211 10:39:33.586808 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" event={"ID":"d8539d49-e453-4b15-a4d6-0e0583b93390","Type":"ContainerDied","Data":"ed84fbbb78234326717842bc971216184e3ae7a96479896d7aebbe056f8245a7"} Dec 11 10:39:33 crc kubenswrapper[5016]: I1211 10:39:33.586871 5016 scope.go:117] "RemoveContainer" containerID="c906b2e6c1fdf3d3c54425c0ce75c9e2602c7ad43116b532768fcb1075c0b67d" Dec 11 10:39:33 crc kubenswrapper[5016]: I1211 10:39:33.587081 5016 scope.go:117] "RemoveContainer" containerID="ed84fbbb78234326717842bc971216184e3ae7a96479896d7aebbe056f8245a7" Dec 11 10:39:33 crc kubenswrapper[5016]: E1211 10:39:33.587279 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:39:33 crc kubenswrapper[5016]: I1211 10:39:33.625760 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"4fbf0bcc7416d4977088f43e776a058d9ebf9db657f5c7b3d14f4efeb7c4c4ac"} Dec 11 10:39:33 crc kubenswrapper[5016]: I1211 10:39:33.625799 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0c0573c8e63f1d7894d3a43609a5c4a5b22ddbd296d7a4afc41369f5aa320038"} Dec 11 10:39:34 crc kubenswrapper[5016]: I1211 10:39:34.633533 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"ee069b0bb6017c023d90ecc761de138cb957f3f68ccf40405cfcc808e39aa728"} Dec 11 10:39:34 crc kubenswrapper[5016]: I1211 10:39:34.633833 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f2e07cf4ec180afeaed7f06f0c2707b7bde39edeaed0a84806731de791c40923"} Dec 11 10:39:34 crc kubenswrapper[5016]: I1211 10:39:34.634098 5016 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="071ea1a0-65ea-49d7-a4b1-0f8a312c0112" Dec 11 10:39:34 crc kubenswrapper[5016]: I1211 10:39:34.634112 5016 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="071ea1a0-65ea-49d7-a4b1-0f8a312c0112" Dec 11 10:39:34 crc kubenswrapper[5016]: I1211 10:39:34.634479 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:39:34 crc kubenswrapper[5016]: I1211 10:39:34.635582 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/1.log" Dec 11 10:39:34 crc kubenswrapper[5016]: I1211 10:39:34.636028 5016 scope.go:117] "RemoveContainer" containerID="ed84fbbb78234326717842bc971216184e3ae7a96479896d7aebbe056f8245a7" Dec 11 10:39:34 crc kubenswrapper[5016]: E1211 10:39:34.636213 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:39:34 crc kubenswrapper[5016]: I1211 10:39:34.637281 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tp5lv" event={"ID":"7242e8c3-6ed6-4613-8fc9-1339be494e56","Type":"ContainerStarted","Data":"49f36ac691fccb6c64dcb740e006d223ac23b60a5d4657c2f6fec3f0ab151b49"} Dec 11 10:39:35 crc kubenswrapper[5016]: I1211 10:39:35.985128 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:39:35 crc kubenswrapper[5016]: I1211 10:39:35.985231 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.161996 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.162097 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.377729 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.377837 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.417203 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.417741 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.418801 5016 scope.go:117] "RemoveContainer" containerID="ed84fbbb78234326717842bc971216184e3ae7a96479896d7aebbe056f8245a7" Dec 11 10:39:36 crc kubenswrapper[5016]: E1211 10:39:36.419132 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.500720 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.501772 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.553804 5016 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]log ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]etcd ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/openshift.io-startkubeinformers ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/openshift.io-api-request-count-filter ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/generic-apiserver-start-informers ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/priority-and-fairness-config-consumer ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/priority-and-fairness-filter ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/start-apiextensions-informers ok Dec 11 10:39:36 crc kubenswrapper[5016]: [-]poststarthook/start-apiextensions-controllers failed: reason withheld Dec 11 10:39:36 crc kubenswrapper[5016]: [-]poststarthook/crd-informer-synced failed: reason withheld Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/start-system-namespaces-controller ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/start-cluster-authentication-info-controller ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/start-legacy-token-tracking-controller ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/start-service-ip-repair-controllers ok Dec 11 10:39:36 crc kubenswrapper[5016]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Dec 11 10:39:36 crc kubenswrapper[5016]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/priority-and-fairness-config-producer ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/bootstrap-controller ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/start-kube-aggregator-informers ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/apiservice-status-local-available-controller ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/apiservice-status-remote-available-controller ok Dec 11 10:39:36 crc kubenswrapper[5016]: [-]poststarthook/apiservice-registration-controller failed: reason withheld Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/apiservice-wait-for-first-sync ok Dec 11 10:39:36 crc kubenswrapper[5016]: [-]poststarthook/apiservice-discovery-controller failed: reason withheld Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/kube-apiserver-autoregistration ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]autoregister-completion ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/apiservice-openapi-controller ok Dec 11 10:39:36 crc kubenswrapper[5016]: [+]poststarthook/apiservice-openapiv3-controller ok Dec 11 10:39:36 crc kubenswrapper[5016]: livez check failed Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.553881 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.710088 5016 generic.go:334] "Generic (PLEG): container finished" podID="89fda315-d1f2-484a-aa91-ec75f0b0227e" containerID="1aa80eb5a334fb04c2d4caad344fa5bc35a2f4815cd89e472726de20a9f109dc" exitCode=0 Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.710170 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcjf" event={"ID":"89fda315-d1f2-484a-aa91-ec75f0b0227e","Type":"ContainerDied","Data":"1aa80eb5a334fb04c2d4caad344fa5bc35a2f4815cd89e472726de20a9f109dc"} Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.715801 5016 generic.go:334] "Generic (PLEG): container finished" podID="7242e8c3-6ed6-4613-8fc9-1339be494e56" containerID="49f36ac691fccb6c64dcb740e006d223ac23b60a5d4657c2f6fec3f0ab151b49" exitCode=0 Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.716102 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tp5lv" event={"ID":"7242e8c3-6ed6-4613-8fc9-1339be494e56","Type":"ContainerDied","Data":"49f36ac691fccb6c64dcb740e006d223ac23b60a5d4657c2f6fec3f0ab151b49"} Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.717621 5016 scope.go:117] "RemoveContainer" containerID="ed84fbbb78234326717842bc971216184e3ae7a96479896d7aebbe056f8245a7" Dec 11 10:39:36 crc kubenswrapper[5016]: E1211 10:39:36.717879 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.741424 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:39:36 crc kubenswrapper[5016]: I1211 10:39:36.854091 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:39:37 crc kubenswrapper[5016]: I1211 10:39:37.638511 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-fc9qn" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" containerName="registry-server" probeResult="failure" output=< Dec 11 10:39:37 crc kubenswrapper[5016]: timeout: failed to connect service ":50051" within 1s Dec 11 10:39:37 crc kubenswrapper[5016]: > Dec 11 10:39:37 crc kubenswrapper[5016]: I1211 10:39:37.645764 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-c6sdb" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" containerName="registry-server" probeResult="failure" output=< Dec 11 10:39:37 crc kubenswrapper[5016]: timeout: failed to connect service ":50051" within 1s Dec 11 10:39:37 crc kubenswrapper[5016]: > Dec 11 10:39:38 crc kubenswrapper[5016]: I1211 10:39:38.087466 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:39:38 crc kubenswrapper[5016]: I1211 10:39:38.087963 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:39:38 crc kubenswrapper[5016]: I1211 10:39:38.248786 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:39:38 crc kubenswrapper[5016]: I1211 10:39:38.728179 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tp5lv" event={"ID":"7242e8c3-6ed6-4613-8fc9-1339be494e56","Type":"ContainerStarted","Data":"ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492"} Dec 11 10:39:38 crc kubenswrapper[5016]: I1211 10:39:38.829480 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:39:38 crc kubenswrapper[5016]: I1211 10:39:38.917749 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:39:38 crc kubenswrapper[5016]: I1211 10:39:38.917826 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:39:39 crc kubenswrapper[5016]: I1211 10:39:39.047241 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:39:39 crc kubenswrapper[5016]: I1211 10:39:39.783597 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:39:40 crc kubenswrapper[5016]: I1211 10:39:40.318637 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:39:40 crc kubenswrapper[5016]: I1211 10:39:40.318772 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:39:40 crc kubenswrapper[5016]: I1211 10:39:40.911726 5016 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:39:41 crc kubenswrapper[5016]: I1211 10:39:41.111292 5016 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="890177ec-3a3d-40e6-8769-1cbdcf81d16a" Dec 11 10:39:41 crc kubenswrapper[5016]: I1211 10:39:41.378972 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rx8bv" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" containerName="registry-server" probeResult="failure" output=< Dec 11 10:39:41 crc kubenswrapper[5016]: timeout: failed to connect service ":50051" within 1s Dec 11 10:39:41 crc kubenswrapper[5016]: > Dec 11 10:39:41 crc kubenswrapper[5016]: I1211 10:39:41.748872 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcjf" event={"ID":"89fda315-d1f2-484a-aa91-ec75f0b0227e","Type":"ContainerStarted","Data":"96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261"} Dec 11 10:39:41 crc kubenswrapper[5016]: I1211 10:39:41.749090 5016 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="071ea1a0-65ea-49d7-a4b1-0f8a312c0112" Dec 11 10:39:41 crc kubenswrapper[5016]: I1211 10:39:41.749260 5016 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="071ea1a0-65ea-49d7-a4b1-0f8a312c0112" Dec 11 10:39:41 crc kubenswrapper[5016]: I1211 10:39:41.752964 5016 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="890177ec-3a3d-40e6-8769-1cbdcf81d16a" Dec 11 10:39:43 crc kubenswrapper[5016]: I1211 10:39:43.338686 5016 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Dec 11 10:39:45 crc kubenswrapper[5016]: I1211 10:39:45.813711 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:39:45 crc kubenswrapper[5016]: I1211 10:39:45.814173 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:39:45 crc kubenswrapper[5016]: I1211 10:39:45.868401 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:39:46 crc kubenswrapper[5016]: I1211 10:39:46.044239 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:39:46 crc kubenswrapper[5016]: I1211 10:39:46.119433 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:39:46 crc kubenswrapper[5016]: I1211 10:39:46.180382 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:39:46 crc kubenswrapper[5016]: I1211 10:39:46.227817 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:39:46 crc kubenswrapper[5016]: I1211 10:39:46.867389 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:39:48 crc kubenswrapper[5016]: I1211 10:39:48.474208 5016 scope.go:117] "RemoveContainer" containerID="ed84fbbb78234326717842bc971216184e3ae7a96479896d7aebbe056f8245a7" Dec 11 10:39:50 crc kubenswrapper[5016]: I1211 10:39:50.323040 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:39:50 crc kubenswrapper[5016]: I1211 10:39:50.323376 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:39:50 crc kubenswrapper[5016]: I1211 10:39:50.375911 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:39:50 crc kubenswrapper[5016]: I1211 10:39:50.393597 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:39:50 crc kubenswrapper[5016]: I1211 10:39:50.422817 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:39:50 crc kubenswrapper[5016]: I1211 10:39:50.854209 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.823797 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-ljcrh_af75da0d-e4cb-4961-b57a-ea888c20af89/control-plane-machine-set-operator/0.log" Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.824580 5016 generic.go:334] "Generic (PLEG): container finished" podID="af75da0d-e4cb-4961-b57a-ea888c20af89" containerID="9ce4148ece840ebe41ad58c912bfffe8834d0fbc87aad978fef1853fa1e8b6a2" exitCode=1 Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.824646 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" event={"ID":"af75da0d-e4cb-4961-b57a-ea888c20af89","Type":"ContainerDied","Data":"9ce4148ece840ebe41ad58c912bfffe8834d0fbc87aad978fef1853fa1e8b6a2"} Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.825305 5016 scope.go:117] "RemoveContainer" containerID="9ce4148ece840ebe41ad58c912bfffe8834d0fbc87aad978fef1853fa1e8b6a2" Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.826916 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/2.log" Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.827661 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/1.log" Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.827727 5016 generic.go:334] "Generic (PLEG): container finished" podID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerID="685a4fb67509bd8cc2a9ca6f82ac9426ee9bb096fd8af07a79562f3e2cbf0a3d" exitCode=1 Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.827810 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" event={"ID":"d8539d49-e453-4b15-a4d6-0e0583b93390","Type":"ContainerDied","Data":"685a4fb67509bd8cc2a9ca6f82ac9426ee9bb096fd8af07a79562f3e2cbf0a3d"} Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.827869 5016 scope.go:117] "RemoveContainer" containerID="ed84fbbb78234326717842bc971216184e3ae7a96479896d7aebbe056f8245a7" Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.829503 5016 scope.go:117] "RemoveContainer" containerID="685a4fb67509bd8cc2a9ca6f82ac9426ee9bb096fd8af07a79562f3e2cbf0a3d" Dec 11 10:39:52 crc kubenswrapper[5016]: E1211 10:39:52.830063 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.834898 5016 generic.go:334] "Generic (PLEG): container finished" podID="b1573c39-dbf1-475d-90d8-2bc8d89f18c6" containerID="e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b" exitCode=0 Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.834998 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" event={"ID":"b1573c39-dbf1-475d-90d8-2bc8d89f18c6","Type":"ContainerDied","Data":"e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b"} Dec 11 10:39:52 crc kubenswrapper[5016]: I1211 10:39:52.835617 5016 scope.go:117] "RemoveContainer" containerID="e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b" Dec 11 10:39:53 crc kubenswrapper[5016]: I1211 10:39:53.843135 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" event={"ID":"b1573c39-dbf1-475d-90d8-2bc8d89f18c6","Type":"ContainerStarted","Data":"09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3"} Dec 11 10:39:53 crc kubenswrapper[5016]: I1211 10:39:53.844167 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:39:53 crc kubenswrapper[5016]: I1211 10:39:53.846604 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-ljcrh_af75da0d-e4cb-4961-b57a-ea888c20af89/control-plane-machine-set-operator/0.log" Dec 11 10:39:53 crc kubenswrapper[5016]: I1211 10:39:53.846672 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" event={"ID":"af75da0d-e4cb-4961-b57a-ea888c20af89","Type":"ContainerStarted","Data":"0b4f205a4279cc0525d5c597baf6cce03ca206d85d0ed2e91ac43a490cf5d5e8"} Dec 11 10:39:53 crc kubenswrapper[5016]: I1211 10:39:53.848130 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:39:53 crc kubenswrapper[5016]: I1211 10:39:53.849382 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/2.log" Dec 11 10:39:56 crc kubenswrapper[5016]: I1211 10:39:56.417176 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:39:56 crc kubenswrapper[5016]: I1211 10:39:56.417498 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:39:56 crc kubenswrapper[5016]: I1211 10:39:56.418196 5016 scope.go:117] "RemoveContainer" containerID="685a4fb67509bd8cc2a9ca6f82ac9426ee9bb096fd8af07a79562f3e2cbf0a3d" Dec 11 10:39:56 crc kubenswrapper[5016]: E1211 10:39:56.418459 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:40:07 crc kubenswrapper[5016]: I1211 10:40:07.475016 5016 scope.go:117] "RemoveContainer" containerID="685a4fb67509bd8cc2a9ca6f82ac9426ee9bb096fd8af07a79562f3e2cbf0a3d" Dec 11 10:40:07 crc kubenswrapper[5016]: E1211 10:40:07.475843 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:40:08 crc kubenswrapper[5016]: I1211 10:40:08.357605 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 11 10:40:10 crc kubenswrapper[5016]: I1211 10:40:10.078463 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 11 10:40:11 crc kubenswrapper[5016]: I1211 10:40:11.398515 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 11 10:40:11 crc kubenswrapper[5016]: I1211 10:40:11.726910 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 11 10:40:11 crc kubenswrapper[5016]: I1211 10:40:11.905552 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 11 10:40:12 crc kubenswrapper[5016]: I1211 10:40:12.630864 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 11 10:40:13 crc kubenswrapper[5016]: I1211 10:40:13.238990 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 11 10:40:13 crc kubenswrapper[5016]: I1211 10:40:13.908546 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 11 10:40:14 crc kubenswrapper[5016]: I1211 10:40:14.528922 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 11 10:40:15 crc kubenswrapper[5016]: I1211 10:40:15.065557 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 11 10:40:15 crc kubenswrapper[5016]: I1211 10:40:15.082712 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 11 10:40:16 crc kubenswrapper[5016]: I1211 10:40:16.231853 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 11 10:40:16 crc kubenswrapper[5016]: I1211 10:40:16.798605 5016 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 11 10:40:16 crc kubenswrapper[5016]: I1211 10:40:16.943256 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 11 10:40:17 crc kubenswrapper[5016]: I1211 10:40:17.087739 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 11 10:40:17 crc kubenswrapper[5016]: I1211 10:40:17.194715 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 11 10:40:17 crc kubenswrapper[5016]: I1211 10:40:17.219460 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 11 10:40:18 crc kubenswrapper[5016]: I1211 10:40:18.277299 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 11 10:40:18 crc kubenswrapper[5016]: I1211 10:40:18.430605 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 11 10:40:18 crc kubenswrapper[5016]: I1211 10:40:18.835222 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 11 10:40:18 crc kubenswrapper[5016]: I1211 10:40:18.932671 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 11 10:40:19 crc kubenswrapper[5016]: I1211 10:40:19.315208 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 11 10:40:19 crc kubenswrapper[5016]: I1211 10:40:19.774179 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 11 10:40:19 crc kubenswrapper[5016]: I1211 10:40:19.859676 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 11 10:40:20 crc kubenswrapper[5016]: I1211 10:40:20.150427 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 11 10:40:20 crc kubenswrapper[5016]: I1211 10:40:20.817191 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 11 10:40:20 crc kubenswrapper[5016]: I1211 10:40:20.965116 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 11 10:40:21 crc kubenswrapper[5016]: I1211 10:40:21.326192 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 11 10:40:21 crc kubenswrapper[5016]: I1211 10:40:21.393737 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 11 10:40:21 crc kubenswrapper[5016]: I1211 10:40:21.582019 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 11 10:40:22 crc kubenswrapper[5016]: I1211 10:40:22.285369 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 11 10:40:22 crc kubenswrapper[5016]: I1211 10:40:22.365623 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 11 10:40:22 crc kubenswrapper[5016]: I1211 10:40:22.476132 5016 scope.go:117] "RemoveContainer" containerID="685a4fb67509bd8cc2a9ca6f82ac9426ee9bb096fd8af07a79562f3e2cbf0a3d" Dec 11 10:40:22 crc kubenswrapper[5016]: I1211 10:40:22.657264 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 11 10:40:22 crc kubenswrapper[5016]: I1211 10:40:22.759383 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 11 10:40:23 crc kubenswrapper[5016]: I1211 10:40:23.044065 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/2.log" Dec 11 10:40:23 crc kubenswrapper[5016]: I1211 10:40:23.044119 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" event={"ID":"d8539d49-e453-4b15-a4d6-0e0583b93390","Type":"ContainerStarted","Data":"f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65"} Dec 11 10:40:23 crc kubenswrapper[5016]: I1211 10:40:23.044507 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:40:23 crc kubenswrapper[5016]: I1211 10:40:23.046204 5016 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-kp5bk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Dec 11 10:40:23 crc kubenswrapper[5016]: I1211 10:40:23.046256 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Dec 11 10:40:23 crc kubenswrapper[5016]: I1211 10:40:23.287280 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 11 10:40:23 crc kubenswrapper[5016]: I1211 10:40:23.408687 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 11 10:40:23 crc kubenswrapper[5016]: I1211 10:40:23.582032 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 11 10:40:23 crc kubenswrapper[5016]: I1211 10:40:23.801490 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 11 10:40:23 crc kubenswrapper[5016]: I1211 10:40:23.914328 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.036928 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.051099 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/3.log" Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.051631 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/2.log" Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.051713 5016 generic.go:334] "Generic (PLEG): container finished" podID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerID="f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65" exitCode=1 Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.051750 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" event={"ID":"d8539d49-e453-4b15-a4d6-0e0583b93390","Type":"ContainerDied","Data":"f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65"} Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.051813 5016 scope.go:117] "RemoveContainer" containerID="685a4fb67509bd8cc2a9ca6f82ac9426ee9bb096fd8af07a79562f3e2cbf0a3d" Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.052719 5016 scope.go:117] "RemoveContainer" containerID="f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65" Dec 11 10:40:24 crc kubenswrapper[5016]: E1211 10:40:24.053249 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.111685 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.150978 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.154931 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.322077 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.781038 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 11 10:40:24 crc kubenswrapper[5016]: I1211 10:40:24.920873 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.060858 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/3.log" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.061410 5016 scope.go:117] "RemoveContainer" containerID="f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65" Dec 11 10:40:25 crc kubenswrapper[5016]: E1211 10:40:25.061617 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.121985 5016 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.122809 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rzcjf" podStartSLOduration=58.562710034 podStartE2EDuration="4m26.122788779s" podCreationTimestamp="2025-12-11 10:35:59 +0000 UTC" firstStartedPulling="2025-12-11 10:36:10.732131987 +0000 UTC m=+87.550691576" lastFinishedPulling="2025-12-11 10:39:38.292210752 +0000 UTC m=+295.110770321" observedRunningTime="2025-12-11 10:39:42.77752956 +0000 UTC m=+299.596089129" watchObservedRunningTime="2025-12-11 10:40:25.122788779 +0000 UTC m=+341.941348358" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.123460 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=161.123454315 podStartE2EDuration="2m41.123454315s" podCreationTimestamp="2025-12-11 10:37:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:39:40.92665503 +0000 UTC m=+297.745214649" watchObservedRunningTime="2025-12-11 10:40:25.123454315 +0000 UTC m=+341.942013894" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.124254 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hqmxw" podStartSLOduration=58.510862464 podStartE2EDuration="4m28.124246635s" podCreationTimestamp="2025-12-11 10:35:57 +0000 UTC" firstStartedPulling="2025-12-11 10:36:01.59433686 +0000 UTC m=+78.412896459" lastFinishedPulling="2025-12-11 10:39:31.207721051 +0000 UTC m=+288.026280630" observedRunningTime="2025-12-11 10:39:41.517674983 +0000 UTC m=+298.336234582" watchObservedRunningTime="2025-12-11 10:40:25.124246635 +0000 UTC m=+341.942806214" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.124961 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-c6sdb" podStartSLOduration=58.787737399 podStartE2EDuration="4m30.124932472s" podCreationTimestamp="2025-12-11 10:35:55 +0000 UTC" firstStartedPulling="2025-12-11 10:35:59.344498277 +0000 UTC m=+76.163057856" lastFinishedPulling="2025-12-11 10:39:30.68169335 +0000 UTC m=+287.500252929" observedRunningTime="2025-12-11 10:39:41.265994332 +0000 UTC m=+298.084553921" watchObservedRunningTime="2025-12-11 10:40:25.124932472 +0000 UTC m=+341.943492051" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.125469 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tp5lv" podStartSLOduration=52.246420798 podStartE2EDuration="4m30.125464335s" podCreationTimestamp="2025-12-11 10:35:55 +0000 UTC" firstStartedPulling="2025-12-11 10:35:59.375135306 +0000 UTC m=+76.193694885" lastFinishedPulling="2025-12-11 10:39:37.254178843 +0000 UTC m=+294.072738422" observedRunningTime="2025-12-11 10:39:41.464717989 +0000 UTC m=+298.283277598" watchObservedRunningTime="2025-12-11 10:40:25.125464335 +0000 UTC m=+341.944023914" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.126159 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-r5rgf" podStartSLOduration=58.087842414 podStartE2EDuration="4m30.126153552s" podCreationTimestamp="2025-12-11 10:35:55 +0000 UTC" firstStartedPulling="2025-12-11 10:35:59.396142552 +0000 UTC m=+76.214702131" lastFinishedPulling="2025-12-11 10:39:31.43445369 +0000 UTC m=+288.253013269" observedRunningTime="2025-12-11 10:39:41.427868353 +0000 UTC m=+298.246427942" watchObservedRunningTime="2025-12-11 10:40:25.126153552 +0000 UTC m=+341.944713131" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.126626 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vb25j" podStartSLOduration=66.481331346 podStartE2EDuration="4m28.126621024s" podCreationTimestamp="2025-12-11 10:35:57 +0000 UTC" firstStartedPulling="2025-12-11 10:36:09.715781017 +0000 UTC m=+86.534340596" lastFinishedPulling="2025-12-11 10:39:31.361070705 +0000 UTC m=+288.179630274" observedRunningTime="2025-12-11 10:39:41.392849965 +0000 UTC m=+298.211409554" watchObservedRunningTime="2025-12-11 10:40:25.126621024 +0000 UTC m=+341.945180603" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.126896 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fc9qn" podStartSLOduration=58.111792851 podStartE2EDuration="4m30.12689019s" podCreationTimestamp="2025-12-11 10:35:55 +0000 UTC" firstStartedPulling="2025-12-11 10:35:59.351932064 +0000 UTC m=+76.170491643" lastFinishedPulling="2025-12-11 10:39:31.367029403 +0000 UTC m=+288.185588982" observedRunningTime="2025-12-11 10:39:41.372792162 +0000 UTC m=+298.191351741" watchObservedRunningTime="2025-12-11 10:40:25.12689019 +0000 UTC m=+341.945449779" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.127025 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rx8bv" podStartSLOduration=65.477690916 podStartE2EDuration="4m27.127019133s" podCreationTimestamp="2025-12-11 10:35:58 +0000 UTC" firstStartedPulling="2025-12-11 10:36:09.717757347 +0000 UTC m=+86.536316926" lastFinishedPulling="2025-12-11 10:39:31.367085564 +0000 UTC m=+288.185645143" observedRunningTime="2025-12-11 10:39:41.498260958 +0000 UTC m=+298.316820537" watchObservedRunningTime="2025-12-11 10:40:25.127019133 +0000 UTC m=+341.945578732" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.127621 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-558db77b4-4k8l5"] Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.127674 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.133677 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.173731 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=45.173704826 podStartE2EDuration="45.173704826s" podCreationTimestamp="2025-12-11 10:39:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:40:25.15241215 +0000 UTC m=+341.970971729" watchObservedRunningTime="2025-12-11 10:40:25.173704826 +0000 UTC m=+341.992264415" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.382858 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.483800 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" path="/var/lib/kubelet/pods/a91554fe-759f-4f9a-9d88-7b4d8650a08b/volumes" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.569035 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.606750 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.630352 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 11 10:40:25 crc kubenswrapper[5016]: I1211 10:40:25.864622 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 11 10:40:26 crc kubenswrapper[5016]: I1211 10:40:26.305134 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 11 10:40:26 crc kubenswrapper[5016]: I1211 10:40:26.336431 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 11 10:40:26 crc kubenswrapper[5016]: I1211 10:40:26.417644 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:40:26 crc kubenswrapper[5016]: I1211 10:40:26.418321 5016 scope.go:117] "RemoveContainer" containerID="f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65" Dec 11 10:40:26 crc kubenswrapper[5016]: E1211 10:40:26.418512 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:40:26 crc kubenswrapper[5016]: I1211 10:40:26.496020 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:40:26 crc kubenswrapper[5016]: I1211 10:40:26.581637 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 11 10:40:26 crc kubenswrapper[5016]: I1211 10:40:26.716819 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 11 10:40:26 crc kubenswrapper[5016]: I1211 10:40:26.901508 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 11 10:40:27 crc kubenswrapper[5016]: I1211 10:40:27.074387 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 11 10:40:27 crc kubenswrapper[5016]: I1211 10:40:27.076900 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 11 10:40:27 crc kubenswrapper[5016]: I1211 10:40:27.310098 5016 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 11 10:40:27 crc kubenswrapper[5016]: I1211 10:40:27.396996 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 11 10:40:27 crc kubenswrapper[5016]: I1211 10:40:27.486285 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 11 10:40:27 crc kubenswrapper[5016]: I1211 10:40:27.592287 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 11 10:40:27 crc kubenswrapper[5016]: I1211 10:40:27.597040 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 11 10:40:27 crc kubenswrapper[5016]: I1211 10:40:27.782194 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 11 10:40:28 crc kubenswrapper[5016]: I1211 10:40:28.081909 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-machine-approver_machine-approver-56656f9798-7tv87_73e450c1-7bc9-4502-b3c5-e7845ba29342/machine-approver-controller/0.log" Dec 11 10:40:28 crc kubenswrapper[5016]: I1211 10:40:28.082436 5016 generic.go:334] "Generic (PLEG): container finished" podID="73e450c1-7bc9-4502-b3c5-e7845ba29342" containerID="8ac3dedf3ebc4d1f0f5388fd29026351106da3ba4c411bad1b3b88a9697a2c07" exitCode=255 Dec 11 10:40:28 crc kubenswrapper[5016]: I1211 10:40:28.082551 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" event={"ID":"73e450c1-7bc9-4502-b3c5-e7845ba29342","Type":"ContainerDied","Data":"8ac3dedf3ebc4d1f0f5388fd29026351106da3ba4c411bad1b3b88a9697a2c07"} Dec 11 10:40:28 crc kubenswrapper[5016]: I1211 10:40:28.083328 5016 scope.go:117] "RemoveContainer" containerID="8ac3dedf3ebc4d1f0f5388fd29026351106da3ba4c411bad1b3b88a9697a2c07" Dec 11 10:40:28 crc kubenswrapper[5016]: I1211 10:40:28.243211 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 11 10:40:28 crc kubenswrapper[5016]: I1211 10:40:28.365231 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 11 10:40:28 crc kubenswrapper[5016]: I1211 10:40:28.617675 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 11 10:40:28 crc kubenswrapper[5016]: I1211 10:40:28.692318 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 11 10:40:28 crc kubenswrapper[5016]: I1211 10:40:28.786442 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 11 10:40:28 crc kubenswrapper[5016]: I1211 10:40:28.951105 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 11 10:40:29 crc kubenswrapper[5016]: I1211 10:40:29.095761 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-machine-approver_machine-approver-56656f9798-7tv87_73e450c1-7bc9-4502-b3c5-e7845ba29342/machine-approver-controller/0.log" Dec 11 10:40:29 crc kubenswrapper[5016]: I1211 10:40:29.096907 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7tv87" event={"ID":"73e450c1-7bc9-4502-b3c5-e7845ba29342","Type":"ContainerStarted","Data":"28d1b72bb730532e2cf9f0e3bbd77e4e07cf8f9a32cac20567c1fe446080fb88"} Dec 11 10:40:29 crc kubenswrapper[5016]: I1211 10:40:29.444932 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 11 10:40:29 crc kubenswrapper[5016]: I1211 10:40:29.724482 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 11 10:40:29 crc kubenswrapper[5016]: I1211 10:40:29.775756 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 11 10:40:29 crc kubenswrapper[5016]: I1211 10:40:29.945024 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 11 10:40:30 crc kubenswrapper[5016]: I1211 10:40:30.046761 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 11 10:40:30 crc kubenswrapper[5016]: I1211 10:40:30.138542 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 11 10:40:30 crc kubenswrapper[5016]: I1211 10:40:30.245414 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 11 10:40:30 crc kubenswrapper[5016]: I1211 10:40:30.326168 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 11 10:40:30 crc kubenswrapper[5016]: I1211 10:40:30.611917 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 11 10:40:30 crc kubenswrapper[5016]: I1211 10:40:30.711985 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 11 10:40:30 crc kubenswrapper[5016]: I1211 10:40:30.727929 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 11 10:40:30 crc kubenswrapper[5016]: I1211 10:40:30.823855 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 11 10:40:30 crc kubenswrapper[5016]: I1211 10:40:30.975492 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 11 10:40:31 crc kubenswrapper[5016]: I1211 10:40:31.102229 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 11 10:40:31 crc kubenswrapper[5016]: I1211 10:40:31.478469 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 11 10:40:31 crc kubenswrapper[5016]: I1211 10:40:31.762721 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 11 10:40:31 crc kubenswrapper[5016]: I1211 10:40:31.773184 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 11 10:40:31 crc kubenswrapper[5016]: I1211 10:40:31.875317 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 11 10:40:31 crc kubenswrapper[5016]: I1211 10:40:31.965293 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 11 10:40:31 crc kubenswrapper[5016]: I1211 10:40:31.967718 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 11 10:40:31 crc kubenswrapper[5016]: I1211 10:40:31.997539 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 11 10:40:32 crc kubenswrapper[5016]: I1211 10:40:32.066623 5016 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 11 10:40:32 crc kubenswrapper[5016]: I1211 10:40:32.519779 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 11 10:40:32 crc kubenswrapper[5016]: I1211 10:40:32.807603 5016 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 11 10:40:33 crc kubenswrapper[5016]: I1211 10:40:33.076834 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 11 10:40:33 crc kubenswrapper[5016]: I1211 10:40:33.082177 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 11 10:40:33 crc kubenswrapper[5016]: I1211 10:40:33.250419 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 11 10:40:33 crc kubenswrapper[5016]: I1211 10:40:33.523563 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 11 10:40:33 crc kubenswrapper[5016]: I1211 10:40:33.681380 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 11 10:40:33 crc kubenswrapper[5016]: I1211 10:40:33.755155 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 11 10:40:34 crc kubenswrapper[5016]: I1211 10:40:34.314859 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 11 10:40:34 crc kubenswrapper[5016]: I1211 10:40:34.399730 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 11 10:40:34 crc kubenswrapper[5016]: I1211 10:40:34.468575 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 11 10:40:34 crc kubenswrapper[5016]: I1211 10:40:34.526013 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 11 10:40:34 crc kubenswrapper[5016]: I1211 10:40:34.633661 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 11 10:40:34 crc kubenswrapper[5016]: I1211 10:40:34.766334 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 11 10:40:34 crc kubenswrapper[5016]: I1211 10:40:34.898139 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 11 10:40:35 crc kubenswrapper[5016]: I1211 10:40:35.368988 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 11 10:40:35 crc kubenswrapper[5016]: I1211 10:40:35.453567 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 11 10:40:35 crc kubenswrapper[5016]: I1211 10:40:35.461809 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 11 10:40:35 crc kubenswrapper[5016]: I1211 10:40:35.748047 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 11 10:40:35 crc kubenswrapper[5016]: I1211 10:40:35.936029 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 11 10:40:36 crc kubenswrapper[5016]: I1211 10:40:36.141300 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 11 10:40:36 crc kubenswrapper[5016]: I1211 10:40:36.158513 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 11 10:40:36 crc kubenswrapper[5016]: I1211 10:40:36.267227 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 11 10:40:36 crc kubenswrapper[5016]: I1211 10:40:36.278041 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 11 10:40:36 crc kubenswrapper[5016]: I1211 10:40:36.613977 5016 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 11 10:40:36 crc kubenswrapper[5016]: I1211 10:40:36.614335 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://fb8820d3d9ee638e20951da811b9d705c1a02a15688cacce52745491124d8557" gracePeriod=5 Dec 11 10:40:36 crc kubenswrapper[5016]: I1211 10:40:36.651924 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 11 10:40:36 crc kubenswrapper[5016]: I1211 10:40:36.761466 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 11 10:40:37 crc kubenswrapper[5016]: I1211 10:40:37.014482 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 11 10:40:37 crc kubenswrapper[5016]: I1211 10:40:37.030177 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 11 10:40:37 crc kubenswrapper[5016]: I1211 10:40:37.057311 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 11 10:40:37 crc kubenswrapper[5016]: I1211 10:40:37.099095 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 11 10:40:37 crc kubenswrapper[5016]: I1211 10:40:37.234541 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 11 10:40:37 crc kubenswrapper[5016]: I1211 10:40:37.575103 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 11 10:40:37 crc kubenswrapper[5016]: I1211 10:40:37.809919 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 11 10:40:37 crc kubenswrapper[5016]: I1211 10:40:37.890577 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 11 10:40:38 crc kubenswrapper[5016]: I1211 10:40:38.060915 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 11 10:40:38 crc kubenswrapper[5016]: I1211 10:40:38.439874 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 11 10:40:38 crc kubenswrapper[5016]: I1211 10:40:38.469389 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 11 10:40:38 crc kubenswrapper[5016]: I1211 10:40:38.533498 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 11 10:40:38 crc kubenswrapper[5016]: I1211 10:40:38.645418 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 11 10:40:38 crc kubenswrapper[5016]: I1211 10:40:38.661108 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 11 10:40:38 crc kubenswrapper[5016]: I1211 10:40:38.684341 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 11 10:40:38 crc kubenswrapper[5016]: I1211 10:40:38.713554 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 11 10:40:38 crc kubenswrapper[5016]: I1211 10:40:38.717329 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 11 10:40:38 crc kubenswrapper[5016]: I1211 10:40:38.782468 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 11 10:40:38 crc kubenswrapper[5016]: I1211 10:40:38.908077 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 11 10:40:38 crc kubenswrapper[5016]: I1211 10:40:38.918023 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 11 10:40:39 crc kubenswrapper[5016]: I1211 10:40:39.119213 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 11 10:40:39 crc kubenswrapper[5016]: I1211 10:40:39.362961 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 11 10:40:39 crc kubenswrapper[5016]: I1211 10:40:39.490982 5016 scope.go:117] "RemoveContainer" containerID="f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65" Dec 11 10:40:39 crc kubenswrapper[5016]: E1211 10:40:39.492458 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:40:39 crc kubenswrapper[5016]: I1211 10:40:39.630223 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 11 10:40:39 crc kubenswrapper[5016]: I1211 10:40:39.766906 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 11 10:40:39 crc kubenswrapper[5016]: I1211 10:40:39.832753 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 11 10:40:39 crc kubenswrapper[5016]: I1211 10:40:39.949482 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 11 10:40:39 crc kubenswrapper[5016]: I1211 10:40:39.951531 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 11 10:40:39 crc kubenswrapper[5016]: I1211 10:40:39.998477 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 11 10:40:40 crc kubenswrapper[5016]: I1211 10:40:40.086194 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 11 10:40:41 crc kubenswrapper[5016]: I1211 10:40:41.131995 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 11 10:40:41 crc kubenswrapper[5016]: I1211 10:40:41.275291 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 11 10:40:41 crc kubenswrapper[5016]: I1211 10:40:41.305773 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 11 10:40:41 crc kubenswrapper[5016]: I1211 10:40:41.343053 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 11 10:40:41 crc kubenswrapper[5016]: I1211 10:40:41.472427 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 11 10:40:41 crc kubenswrapper[5016]: I1211 10:40:41.568911 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 11 10:40:41 crc kubenswrapper[5016]: I1211 10:40:41.694755 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 11 10:40:42 crc kubenswrapper[5016]: I1211 10:40:42.675475 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 11 10:40:42 crc kubenswrapper[5016]: I1211 10:40:42.686597 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 11 10:40:42 crc kubenswrapper[5016]: I1211 10:40:42.761157 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 11 10:40:42 crc kubenswrapper[5016]: I1211 10:40:42.788636 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 11 10:40:42 crc kubenswrapper[5016]: I1211 10:40:42.927886 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 11 10:40:42 crc kubenswrapper[5016]: I1211 10:40:42.927970 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056319 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056399 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056414 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056478 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056516 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056556 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056561 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056607 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056700 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056828 5016 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056846 5016 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056858 5016 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.056870 5016 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.064044 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.158542 5016 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.214627 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.214694 5016 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="fb8820d3d9ee638e20951da811b9d705c1a02a15688cacce52745491124d8557" exitCode=137 Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.214746 5016 scope.go:117] "RemoveContainer" containerID="fb8820d3d9ee638e20951da811b9d705c1a02a15688cacce52745491124d8557" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.214895 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.232813 5016 scope.go:117] "RemoveContainer" containerID="fb8820d3d9ee638e20951da811b9d705c1a02a15688cacce52745491124d8557" Dec 11 10:40:43 crc kubenswrapper[5016]: E1211 10:40:43.233232 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb8820d3d9ee638e20951da811b9d705c1a02a15688cacce52745491124d8557\": container with ID starting with fb8820d3d9ee638e20951da811b9d705c1a02a15688cacce52745491124d8557 not found: ID does not exist" containerID="fb8820d3d9ee638e20951da811b9d705c1a02a15688cacce52745491124d8557" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.233281 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb8820d3d9ee638e20951da811b9d705c1a02a15688cacce52745491124d8557"} err="failed to get container status \"fb8820d3d9ee638e20951da811b9d705c1a02a15688cacce52745491124d8557\": rpc error: code = NotFound desc = could not find container \"fb8820d3d9ee638e20951da811b9d705c1a02a15688cacce52745491124d8557\": container with ID starting with fb8820d3d9ee638e20951da811b9d705c1a02a15688cacce52745491124d8557 not found: ID does not exist" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.290858 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.313872 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.379696 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.469714 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.481818 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.482080 5016 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.493621 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.493657 5016 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="2bc88c8a-fd5a-4f1f-8603-dc9928c3e952" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.497071 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.497097 5016 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="2bc88c8a-fd5a-4f1f-8603-dc9928c3e952" Dec 11 10:40:43 crc kubenswrapper[5016]: I1211 10:40:43.639601 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 11 10:40:44 crc kubenswrapper[5016]: I1211 10:40:44.117686 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 11 10:40:44 crc kubenswrapper[5016]: I1211 10:40:44.181820 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 11 10:40:44 crc kubenswrapper[5016]: I1211 10:40:44.312309 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 11 10:40:44 crc kubenswrapper[5016]: I1211 10:40:44.345719 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 11 10:40:44 crc kubenswrapper[5016]: I1211 10:40:44.755213 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 11 10:40:44 crc kubenswrapper[5016]: I1211 10:40:44.858649 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 11 10:40:44 crc kubenswrapper[5016]: I1211 10:40:44.902687 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 11 10:40:46 crc kubenswrapper[5016]: I1211 10:40:46.016420 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 11 10:40:46 crc kubenswrapper[5016]: I1211 10:40:46.064680 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 11 10:40:46 crc kubenswrapper[5016]: I1211 10:40:46.336422 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 11 10:40:47 crc kubenswrapper[5016]: I1211 10:40:47.203651 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 11 10:40:47 crc kubenswrapper[5016]: I1211 10:40:47.267343 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 11 10:40:47 crc kubenswrapper[5016]: I1211 10:40:47.542908 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 11 10:40:47 crc kubenswrapper[5016]: I1211 10:40:47.686085 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 11 10:40:47 crc kubenswrapper[5016]: I1211 10:40:47.783630 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 11 10:40:48 crc kubenswrapper[5016]: I1211 10:40:48.371184 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 11 10:40:48 crc kubenswrapper[5016]: I1211 10:40:48.422415 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 11 10:40:49 crc kubenswrapper[5016]: I1211 10:40:49.042545 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 11 10:40:49 crc kubenswrapper[5016]: I1211 10:40:49.308193 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 11 10:40:49 crc kubenswrapper[5016]: I1211 10:40:49.321528 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 11 10:40:49 crc kubenswrapper[5016]: I1211 10:40:49.380538 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 11 10:40:49 crc kubenswrapper[5016]: I1211 10:40:49.784682 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 11 10:40:49 crc kubenswrapper[5016]: I1211 10:40:49.835486 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 11 10:40:50 crc kubenswrapper[5016]: I1211 10:40:50.435647 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 11 10:40:50 crc kubenswrapper[5016]: I1211 10:40:50.928121 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 11 10:40:50 crc kubenswrapper[5016]: I1211 10:40:50.980306 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 11 10:40:51 crc kubenswrapper[5016]: I1211 10:40:51.041079 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 11 10:40:51 crc kubenswrapper[5016]: I1211 10:40:51.124533 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 11 10:40:51 crc kubenswrapper[5016]: I1211 10:40:51.210581 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 11 10:40:51 crc kubenswrapper[5016]: I1211 10:40:51.337611 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 11 10:40:51 crc kubenswrapper[5016]: I1211 10:40:51.454467 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 11 10:40:51 crc kubenswrapper[5016]: I1211 10:40:51.476664 5016 scope.go:117] "RemoveContainer" containerID="f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65" Dec 11 10:40:51 crc kubenswrapper[5016]: E1211 10:40:51.476879 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-kp5bk_openshift-marketplace(d8539d49-e453-4b15-a4d6-0e0583b93390)\"" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" Dec 11 10:40:52 crc kubenswrapper[5016]: I1211 10:40:52.288433 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 11 10:40:52 crc kubenswrapper[5016]: I1211 10:40:52.355969 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 11 10:40:52 crc kubenswrapper[5016]: I1211 10:40:52.524053 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 11 10:40:53 crc kubenswrapper[5016]: I1211 10:40:53.167348 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 11 10:40:53 crc kubenswrapper[5016]: I1211 10:40:53.452678 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 11 10:40:54 crc kubenswrapper[5016]: I1211 10:40:54.039331 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 11 10:40:54 crc kubenswrapper[5016]: I1211 10:40:54.272500 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-ljcrh_af75da0d-e4cb-4961-b57a-ea888c20af89/control-plane-machine-set-operator/1.log" Dec 11 10:40:54 crc kubenswrapper[5016]: I1211 10:40:54.273124 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-ljcrh_af75da0d-e4cb-4961-b57a-ea888c20af89/control-plane-machine-set-operator/0.log" Dec 11 10:40:54 crc kubenswrapper[5016]: I1211 10:40:54.273177 5016 generic.go:334] "Generic (PLEG): container finished" podID="af75da0d-e4cb-4961-b57a-ea888c20af89" containerID="0b4f205a4279cc0525d5c597baf6cce03ca206d85d0ed2e91ac43a490cf5d5e8" exitCode=255 Dec 11 10:40:54 crc kubenswrapper[5016]: I1211 10:40:54.273210 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" event={"ID":"af75da0d-e4cb-4961-b57a-ea888c20af89","Type":"ContainerDied","Data":"0b4f205a4279cc0525d5c597baf6cce03ca206d85d0ed2e91ac43a490cf5d5e8"} Dec 11 10:40:54 crc kubenswrapper[5016]: I1211 10:40:54.273249 5016 scope.go:117] "RemoveContainer" containerID="9ce4148ece840ebe41ad58c912bfffe8834d0fbc87aad978fef1853fa1e8b6a2" Dec 11 10:40:54 crc kubenswrapper[5016]: I1211 10:40:54.273691 5016 scope.go:117] "RemoveContainer" containerID="0b4f205a4279cc0525d5c597baf6cce03ca206d85d0ed2e91ac43a490cf5d5e8" Dec 11 10:40:54 crc kubenswrapper[5016]: E1211 10:40:54.273870 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"control-plane-machine-set-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=control-plane-machine-set-operator pod=control-plane-machine-set-operator-78cbb6b69f-ljcrh_openshift-machine-api(af75da0d-e4cb-4961-b57a-ea888c20af89)\"" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" podUID="af75da0d-e4cb-4961-b57a-ea888c20af89" Dec 11 10:40:54 crc kubenswrapper[5016]: I1211 10:40:54.833468 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 11 10:40:54 crc kubenswrapper[5016]: I1211 10:40:54.998932 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 11 10:40:55 crc kubenswrapper[5016]: I1211 10:40:55.082395 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 11 10:40:55 crc kubenswrapper[5016]: I1211 10:40:55.237921 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 11 10:40:55 crc kubenswrapper[5016]: I1211 10:40:55.284934 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-ljcrh_af75da0d-e4cb-4961-b57a-ea888c20af89/control-plane-machine-set-operator/1.log" Dec 11 10:40:55 crc kubenswrapper[5016]: I1211 10:40:55.684926 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 11 10:40:55 crc kubenswrapper[5016]: I1211 10:40:55.865388 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 11 10:40:55 crc kubenswrapper[5016]: I1211 10:40:55.917780 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 11 10:40:56 crc kubenswrapper[5016]: I1211 10:40:56.251832 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 11 10:40:56 crc kubenswrapper[5016]: I1211 10:40:56.620892 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 11 10:40:57 crc kubenswrapper[5016]: I1211 10:40:57.227807 5016 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 11 10:40:57 crc kubenswrapper[5016]: I1211 10:40:57.571078 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 11 10:40:57 crc kubenswrapper[5016]: I1211 10:40:57.627303 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 11 10:40:57 crc kubenswrapper[5016]: I1211 10:40:57.633265 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 11 10:40:57 crc kubenswrapper[5016]: I1211 10:40:57.766279 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 11 10:40:58 crc kubenswrapper[5016]: I1211 10:40:58.357998 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 11 10:40:58 crc kubenswrapper[5016]: I1211 10:40:58.865124 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 11 10:40:58 crc kubenswrapper[5016]: I1211 10:40:58.872047 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 11 10:41:00 crc kubenswrapper[5016]: I1211 10:41:00.059435 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 11 10:41:00 crc kubenswrapper[5016]: I1211 10:41:00.498207 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 11 10:41:00 crc kubenswrapper[5016]: I1211 10:41:00.684386 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.015457 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.081279 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.690299 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.731464 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.778196 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-5dc57f868f-gzdj8"] Dec 11 10:41:01 crc kubenswrapper[5016]: E1211 10:41:01.778484 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" containerName="installer" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.778506 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" containerName="installer" Dec 11 10:41:01 crc kubenswrapper[5016]: E1211 10:41:01.778515 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" containerName="oauth-openshift" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.778520 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" containerName="oauth-openshift" Dec 11 10:41:01 crc kubenswrapper[5016]: E1211 10:41:01.778530 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.778537 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.778640 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.778655 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="eda32bb5-f0ba-418d-936d-8f3909f5d759" containerName="installer" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.778664 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="a91554fe-759f-4f9a-9d88-7b4d8650a08b" containerName="oauth-openshift" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.779076 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.785216 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.785251 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.785457 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.785567 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.787036 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.787089 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.787264 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.787325 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.787346 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.787429 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.787658 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.787883 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.791545 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.794590 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5dc57f868f-gzdj8"] Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.795972 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.802490 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929115 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-user-template-login\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929159 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-service-ca\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929185 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929319 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-session\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929365 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-user-template-error\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929389 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929413 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvsvw\" (UniqueName: \"kubernetes.io/projected/6ca71bcd-7672-4541-9195-fb53b9ae9eea-kube-api-access-zvsvw\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929443 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ca71bcd-7672-4541-9195-fb53b9ae9eea-audit-policies\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929468 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929484 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929571 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929682 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ca71bcd-7672-4541-9195-fb53b9ae9eea-audit-dir\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929713 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-router-certs\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:01 crc kubenswrapper[5016]: I1211 10:41:01.929739 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031266 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ca71bcd-7672-4541-9195-fb53b9ae9eea-audit-dir\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031319 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-router-certs\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031344 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031379 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-user-template-login\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031414 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-service-ca\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031442 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031486 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-session\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031510 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-user-template-error\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031528 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031547 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvsvw\" (UniqueName: \"kubernetes.io/projected/6ca71bcd-7672-4541-9195-fb53b9ae9eea-kube-api-access-zvsvw\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031567 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ca71bcd-7672-4541-9195-fb53b9ae9eea-audit-policies\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031587 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031608 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031640 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.031418 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ca71bcd-7672-4541-9195-fb53b9ae9eea-audit-dir\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.032688 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.033198 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-service-ca\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.033330 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ca71bcd-7672-4541-9195-fb53b9ae9eea-audit-policies\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.033769 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.037900 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-router-certs\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.038083 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.038503 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.038745 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-session\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.038989 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.039823 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-user-template-login\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.044245 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-user-template-error\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.044364 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ca71bcd-7672-4541-9195-fb53b9ae9eea-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.051492 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvsvw\" (UniqueName: \"kubernetes.io/projected/6ca71bcd-7672-4541-9195-fb53b9ae9eea-kube-api-access-zvsvw\") pod \"oauth-openshift-5dc57f868f-gzdj8\" (UID: \"6ca71bcd-7672-4541-9195-fb53b9ae9eea\") " pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.098038 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.271125 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.393437 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5dc57f868f-gzdj8"] Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.447047 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 11 10:41:02 crc kubenswrapper[5016]: I1211 10:41:02.905541 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 11 10:41:03 crc kubenswrapper[5016]: I1211 10:41:03.346545 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" event={"ID":"6ca71bcd-7672-4541-9195-fb53b9ae9eea","Type":"ContainerStarted","Data":"b3064ce39ed772995137b8984360b1109e2ca1e2674b409c989fd68242583802"} Dec 11 10:41:03 crc kubenswrapper[5016]: I1211 10:41:03.346604 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" event={"ID":"6ca71bcd-7672-4541-9195-fb53b9ae9eea","Type":"ContainerStarted","Data":"c3bcc2018e9c1c344a1d1c740f1127750a160d0d660a68cae5d64f2429c3f465"} Dec 11 10:41:03 crc kubenswrapper[5016]: I1211 10:41:03.347868 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:03 crc kubenswrapper[5016]: I1211 10:41:03.352967 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" Dec 11 10:41:03 crc kubenswrapper[5016]: I1211 10:41:03.373251 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-5dc57f868f-gzdj8" podStartSLOduration=232.373235801 podStartE2EDuration="3m52.373235801s" podCreationTimestamp="2025-12-11 10:37:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:41:03.3715632 +0000 UTC m=+380.190122799" watchObservedRunningTime="2025-12-11 10:41:03.373235801 +0000 UTC m=+380.191795380" Dec 11 10:41:03 crc kubenswrapper[5016]: I1211 10:41:03.477660 5016 scope.go:117] "RemoveContainer" containerID="f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65" Dec 11 10:41:03 crc kubenswrapper[5016]: I1211 10:41:03.644408 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 11 10:41:03 crc kubenswrapper[5016]: I1211 10:41:03.794006 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 11 10:41:04 crc kubenswrapper[5016]: I1211 10:41:04.354659 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/3.log" Dec 11 10:41:04 crc kubenswrapper[5016]: I1211 10:41:04.355175 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" event={"ID":"d8539d49-e453-4b15-a4d6-0e0583b93390","Type":"ContainerStarted","Data":"0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38"} Dec 11 10:41:04 crc kubenswrapper[5016]: I1211 10:41:04.355612 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:41:04 crc kubenswrapper[5016]: I1211 10:41:04.357910 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:41:04 crc kubenswrapper[5016]: I1211 10:41:04.620701 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 11 10:41:05 crc kubenswrapper[5016]: I1211 10:41:05.381637 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 11 10:41:07 crc kubenswrapper[5016]: I1211 10:41:07.030049 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 11 10:41:07 crc kubenswrapper[5016]: I1211 10:41:07.093510 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 11 10:41:07 crc kubenswrapper[5016]: I1211 10:41:07.407067 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 11 10:41:08 crc kubenswrapper[5016]: I1211 10:41:08.586613 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 11 10:41:09 crc kubenswrapper[5016]: I1211 10:41:09.474051 5016 scope.go:117] "RemoveContainer" containerID="0b4f205a4279cc0525d5c597baf6cce03ca206d85d0ed2e91ac43a490cf5d5e8" Dec 11 10:41:10 crc kubenswrapper[5016]: I1211 10:41:10.390883 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-ljcrh_af75da0d-e4cb-4961-b57a-ea888c20af89/control-plane-machine-set-operator/1.log" Dec 11 10:41:10 crc kubenswrapper[5016]: I1211 10:41:10.391539 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ljcrh" event={"ID":"af75da0d-e4cb-4961-b57a-ea888c20af89","Type":"ContainerStarted","Data":"d3be6287ff924fac010a2b3203505139e4b54eda164adff79652d2923429528c"} Dec 11 10:41:12 crc kubenswrapper[5016]: I1211 10:41:12.933242 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:41:12 crc kubenswrapper[5016]: I1211 10:41:12.933757 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:41:18 crc kubenswrapper[5016]: I1211 10:41:18.899470 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 11 10:41:20 crc kubenswrapper[5016]: I1211 10:41:20.661975 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xdpcj"] Dec 11 10:41:20 crc kubenswrapper[5016]: I1211 10:41:20.662182 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" podUID="b1573c39-dbf1-475d-90d8-2bc8d89f18c6" containerName="controller-manager" containerID="cri-o://09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3" gracePeriod=30 Dec 11 10:41:20 crc kubenswrapper[5016]: I1211 10:41:20.755920 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9"] Dec 11 10:41:20 crc kubenswrapper[5016]: I1211 10:41:20.756345 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" podUID="771549fe-a108-4fe9-a461-043432468961" containerName="route-controller-manager" containerID="cri-o://59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e" gracePeriod=30 Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.035443 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.085171 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-config\") pod \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.085244 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzmvt\" (UniqueName: \"kubernetes.io/projected/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-kube-api-access-qzmvt\") pod \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.085288 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-client-ca\") pod \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.085340 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-proxy-ca-bundles\") pod \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.085378 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-serving-cert\") pod \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\" (UID: \"b1573c39-dbf1-475d-90d8-2bc8d89f18c6\") " Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.086620 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-client-ca" (OuterVolumeSpecName: "client-ca") pod "b1573c39-dbf1-475d-90d8-2bc8d89f18c6" (UID: "b1573c39-dbf1-475d-90d8-2bc8d89f18c6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.087761 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "b1573c39-dbf1-475d-90d8-2bc8d89f18c6" (UID: "b1573c39-dbf1-475d-90d8-2bc8d89f18c6"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.088606 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-config" (OuterVolumeSpecName: "config") pod "b1573c39-dbf1-475d-90d8-2bc8d89f18c6" (UID: "b1573c39-dbf1-475d-90d8-2bc8d89f18c6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.092362 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-kube-api-access-qzmvt" (OuterVolumeSpecName: "kube-api-access-qzmvt") pod "b1573c39-dbf1-475d-90d8-2bc8d89f18c6" (UID: "b1573c39-dbf1-475d-90d8-2bc8d89f18c6"). InnerVolumeSpecName "kube-api-access-qzmvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.093419 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b1573c39-dbf1-475d-90d8-2bc8d89f18c6" (UID: "b1573c39-dbf1-475d-90d8-2bc8d89f18c6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.101190 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.186864 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/771549fe-a108-4fe9-a461-043432468961-serving-cert\") pod \"771549fe-a108-4fe9-a461-043432468961\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.186983 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/771549fe-a108-4fe9-a461-043432468961-client-ca\") pod \"771549fe-a108-4fe9-a461-043432468961\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.187011 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqr5k\" (UniqueName: \"kubernetes.io/projected/771549fe-a108-4fe9-a461-043432468961-kube-api-access-dqr5k\") pod \"771549fe-a108-4fe9-a461-043432468961\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.187087 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/771549fe-a108-4fe9-a461-043432468961-config\") pod \"771549fe-a108-4fe9-a461-043432468961\" (UID: \"771549fe-a108-4fe9-a461-043432468961\") " Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.187401 5016 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.187425 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.187438 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.187451 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzmvt\" (UniqueName: \"kubernetes.io/projected/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-kube-api-access-qzmvt\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.187464 5016 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b1573c39-dbf1-475d-90d8-2bc8d89f18c6-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.187973 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/771549fe-a108-4fe9-a461-043432468961-client-ca" (OuterVolumeSpecName: "client-ca") pod "771549fe-a108-4fe9-a461-043432468961" (UID: "771549fe-a108-4fe9-a461-043432468961"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.188114 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/771549fe-a108-4fe9-a461-043432468961-config" (OuterVolumeSpecName: "config") pod "771549fe-a108-4fe9-a461-043432468961" (UID: "771549fe-a108-4fe9-a461-043432468961"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.190918 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/771549fe-a108-4fe9-a461-043432468961-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "771549fe-a108-4fe9-a461-043432468961" (UID: "771549fe-a108-4fe9-a461-043432468961"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.191317 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/771549fe-a108-4fe9-a461-043432468961-kube-api-access-dqr5k" (OuterVolumeSpecName: "kube-api-access-dqr5k") pod "771549fe-a108-4fe9-a461-043432468961" (UID: "771549fe-a108-4fe9-a461-043432468961"). InnerVolumeSpecName "kube-api-access-dqr5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.288171 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/771549fe-a108-4fe9-a461-043432468961-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.288210 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/771549fe-a108-4fe9-a461-043432468961-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.288223 5016 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/771549fe-a108-4fe9-a461-043432468961-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.288232 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqr5k\" (UniqueName: \"kubernetes.io/projected/771549fe-a108-4fe9-a461-043432468961-kube-api-access-dqr5k\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.456108 5016 generic.go:334] "Generic (PLEG): container finished" podID="771549fe-a108-4fe9-a461-043432468961" containerID="59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e" exitCode=0 Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.456151 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.456214 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" event={"ID":"771549fe-a108-4fe9-a461-043432468961","Type":"ContainerDied","Data":"59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e"} Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.456281 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9" event={"ID":"771549fe-a108-4fe9-a461-043432468961","Type":"ContainerDied","Data":"5934380da0368a7bba715eb26ded938297c71eb57ae67081d59cfa8e8ab6b382"} Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.456303 5016 scope.go:117] "RemoveContainer" containerID="59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.459274 5016 generic.go:334] "Generic (PLEG): container finished" podID="b1573c39-dbf1-475d-90d8-2bc8d89f18c6" containerID="09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3" exitCode=0 Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.459308 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" event={"ID":"b1573c39-dbf1-475d-90d8-2bc8d89f18c6","Type":"ContainerDied","Data":"09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3"} Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.459329 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" event={"ID":"b1573c39-dbf1-475d-90d8-2bc8d89f18c6","Type":"ContainerDied","Data":"9fcd8b74d91cdc252e1349ac6381ae4e544549c7eb1e89e495ad5038b021299d"} Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.459423 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xdpcj" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.475722 5016 scope.go:117] "RemoveContainer" containerID="59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e" Dec 11 10:41:21 crc kubenswrapper[5016]: E1211 10:41:21.476663 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e\": container with ID starting with 59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e not found: ID does not exist" containerID="59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.476704 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e"} err="failed to get container status \"59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e\": rpc error: code = NotFound desc = could not find container \"59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e\": container with ID starting with 59eb6d201a900dfb0d2c3971237104a2fbe62283941ef525032d9bd4ed228c9e not found: ID does not exist" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.476728 5016 scope.go:117] "RemoveContainer" containerID="09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.492089 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9"] Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.498838 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-w2qk9"] Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.507681 5016 scope.go:117] "RemoveContainer" containerID="e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.514397 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xdpcj"] Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.522730 5016 scope.go:117] "RemoveContainer" containerID="09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3" Dec 11 10:41:21 crc kubenswrapper[5016]: E1211 10:41:21.523485 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3\": container with ID starting with 09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3 not found: ID does not exist" containerID="09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.523567 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3"} err="failed to get container status \"09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3\": rpc error: code = NotFound desc = could not find container \"09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3\": container with ID starting with 09e80016cd01f7c41e9c6e12c78da5a27e3868d198241ecde1928d7b1ea9f5c3 not found: ID does not exist" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.523621 5016 scope.go:117] "RemoveContainer" containerID="e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b" Dec 11 10:41:21 crc kubenswrapper[5016]: E1211 10:41:21.524063 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b\": container with ID starting with e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b not found: ID does not exist" containerID="e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.524113 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b"} err="failed to get container status \"e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b\": rpc error: code = NotFound desc = could not find container \"e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b\": container with ID starting with e06283c97590150157a9e42c84d6f310cc92cbda0fb5ac1401a04279b148972b not found: ID does not exist" Dec 11 10:41:21 crc kubenswrapper[5016]: I1211 10:41:21.529647 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xdpcj"] Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.080616 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-d7964759b-pgvq6"] Dec 11 10:41:22 crc kubenswrapper[5016]: E1211 10:41:22.080920 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1573c39-dbf1-475d-90d8-2bc8d89f18c6" containerName="controller-manager" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.080933 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1573c39-dbf1-475d-90d8-2bc8d89f18c6" containerName="controller-manager" Dec 11 10:41:22 crc kubenswrapper[5016]: E1211 10:41:22.080971 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="771549fe-a108-4fe9-a461-043432468961" containerName="route-controller-manager" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.080978 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="771549fe-a108-4fe9-a461-043432468961" containerName="route-controller-manager" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.081079 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="771549fe-a108-4fe9-a461-043432468961" containerName="route-controller-manager" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.081093 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1573c39-dbf1-475d-90d8-2bc8d89f18c6" containerName="controller-manager" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.081104 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1573c39-dbf1-475d-90d8-2bc8d89f18c6" containerName="controller-manager" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.081532 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.083649 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.083920 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.083922 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.084721 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.084757 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.084840 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.086102 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz"] Dec 11 10:41:22 crc kubenswrapper[5016]: E1211 10:41:22.086387 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1573c39-dbf1-475d-90d8-2bc8d89f18c6" containerName="controller-manager" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.086408 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1573c39-dbf1-475d-90d8-2bc8d89f18c6" containerName="controller-manager" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.087053 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.088863 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.088998 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.089067 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.089397 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.089992 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.091286 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-d7964759b-pgvq6"] Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.092402 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.092849 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.095480 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz"] Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.196897 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-config\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.197060 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-proxy-ca-bundles\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.197138 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/410ce4c8-1966-4aee-8af2-c50e64495713-serving-cert\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.197168 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5qw2\" (UniqueName: \"kubernetes.io/projected/410ce4c8-1966-4aee-8af2-c50e64495713-kube-api-access-t5qw2\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.197257 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a612e1-3182-41e3-bba9-83269cf38fcc-config\") pod \"route-controller-manager-847b99569b-rwcmz\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.197334 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdr9z\" (UniqueName: \"kubernetes.io/projected/e4a612e1-3182-41e3-bba9-83269cf38fcc-kube-api-access-vdr9z\") pod \"route-controller-manager-847b99569b-rwcmz\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.197404 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4a612e1-3182-41e3-bba9-83269cf38fcc-serving-cert\") pod \"route-controller-manager-847b99569b-rwcmz\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.197521 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-client-ca\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.197583 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e4a612e1-3182-41e3-bba9-83269cf38fcc-client-ca\") pod \"route-controller-manager-847b99569b-rwcmz\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.299073 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a612e1-3182-41e3-bba9-83269cf38fcc-config\") pod \"route-controller-manager-847b99569b-rwcmz\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.299136 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdr9z\" (UniqueName: \"kubernetes.io/projected/e4a612e1-3182-41e3-bba9-83269cf38fcc-kube-api-access-vdr9z\") pod \"route-controller-manager-847b99569b-rwcmz\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.299171 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4a612e1-3182-41e3-bba9-83269cf38fcc-serving-cert\") pod \"route-controller-manager-847b99569b-rwcmz\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.299196 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-client-ca\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.299223 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e4a612e1-3182-41e3-bba9-83269cf38fcc-client-ca\") pod \"route-controller-manager-847b99569b-rwcmz\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.299268 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-config\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.299293 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-proxy-ca-bundles\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.299324 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5qw2\" (UniqueName: \"kubernetes.io/projected/410ce4c8-1966-4aee-8af2-c50e64495713-kube-api-access-t5qw2\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.299343 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/410ce4c8-1966-4aee-8af2-c50e64495713-serving-cert\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.301197 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-client-ca\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.301248 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e4a612e1-3182-41e3-bba9-83269cf38fcc-client-ca\") pod \"route-controller-manager-847b99569b-rwcmz\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.301402 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a612e1-3182-41e3-bba9-83269cf38fcc-config\") pod \"route-controller-manager-847b99569b-rwcmz\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.301828 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-proxy-ca-bundles\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.303351 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-config\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.305850 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4a612e1-3182-41e3-bba9-83269cf38fcc-serving-cert\") pod \"route-controller-manager-847b99569b-rwcmz\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.306636 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/410ce4c8-1966-4aee-8af2-c50e64495713-serving-cert\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.318883 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5qw2\" (UniqueName: \"kubernetes.io/projected/410ce4c8-1966-4aee-8af2-c50e64495713-kube-api-access-t5qw2\") pod \"controller-manager-d7964759b-pgvq6\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.319815 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdr9z\" (UniqueName: \"kubernetes.io/projected/e4a612e1-3182-41e3-bba9-83269cf38fcc-kube-api-access-vdr9z\") pod \"route-controller-manager-847b99569b-rwcmz\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.399240 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.406655 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.412724 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.660345 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-d7964759b-pgvq6"] Dec 11 10:41:22 crc kubenswrapper[5016]: I1211 10:41:22.668500 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz"] Dec 11 10:41:22 crc kubenswrapper[5016]: W1211 10:41:22.682395 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode4a612e1_3182_41e3_bba9_83269cf38fcc.slice/crio-34cb9f09d0cf4e549177bd8eb5b4b4af8f1aa0afce59c4d7c738db9b25bea97a WatchSource:0}: Error finding container 34cb9f09d0cf4e549177bd8eb5b4b4af8f1aa0afce59c4d7c738db9b25bea97a: Status 404 returned error can't find the container with id 34cb9f09d0cf4e549177bd8eb5b4b4af8f1aa0afce59c4d7c738db9b25bea97a Dec 11 10:41:23 crc kubenswrapper[5016]: I1211 10:41:23.484413 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="771549fe-a108-4fe9-a461-043432468961" path="/var/lib/kubelet/pods/771549fe-a108-4fe9-a461-043432468961/volumes" Dec 11 10:41:23 crc kubenswrapper[5016]: I1211 10:41:23.488684 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1573c39-dbf1-475d-90d8-2bc8d89f18c6" path="/var/lib/kubelet/pods/b1573c39-dbf1-475d-90d8-2bc8d89f18c6/volumes" Dec 11 10:41:23 crc kubenswrapper[5016]: I1211 10:41:23.490365 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" event={"ID":"e4a612e1-3182-41e3-bba9-83269cf38fcc","Type":"ContainerStarted","Data":"f02327108ad5e22c03a63a38d72675d74b6869ff57549863641ff1b9b6676977"} Dec 11 10:41:23 crc kubenswrapper[5016]: I1211 10:41:23.490395 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" event={"ID":"e4a612e1-3182-41e3-bba9-83269cf38fcc","Type":"ContainerStarted","Data":"34cb9f09d0cf4e549177bd8eb5b4b4af8f1aa0afce59c4d7c738db9b25bea97a"} Dec 11 10:41:23 crc kubenswrapper[5016]: I1211 10:41:23.490408 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" event={"ID":"410ce4c8-1966-4aee-8af2-c50e64495713","Type":"ContainerStarted","Data":"bdfdc0fe7dbbc74a96cda0bcd9aaf148ca7007ce20912555ad24a7d441cb34ae"} Dec 11 10:41:23 crc kubenswrapper[5016]: I1211 10:41:23.490520 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" event={"ID":"410ce4c8-1966-4aee-8af2-c50e64495713","Type":"ContainerStarted","Data":"5fe313ad316432d70970addcbac2c45f95e2cef3ac2ed87d555ef7145462087d"} Dec 11 10:41:23 crc kubenswrapper[5016]: I1211 10:41:23.490542 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:23 crc kubenswrapper[5016]: I1211 10:41:23.490555 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:23 crc kubenswrapper[5016]: I1211 10:41:23.490693 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:23 crc kubenswrapper[5016]: I1211 10:41:23.540174 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" podStartSLOduration=3.540153143 podStartE2EDuration="3.540153143s" podCreationTimestamp="2025-12-11 10:41:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:41:23.537775165 +0000 UTC m=+400.356334754" watchObservedRunningTime="2025-12-11 10:41:23.540153143 +0000 UTC m=+400.358712742" Dec 11 10:41:23 crc kubenswrapper[5016]: I1211 10:41:23.543618 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:23 crc kubenswrapper[5016]: I1211 10:41:23.574440 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" podStartSLOduration=3.57441557 podStartE2EDuration="3.57441557s" podCreationTimestamp="2025-12-11 10:41:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:41:23.559293321 +0000 UTC m=+400.377852910" watchObservedRunningTime="2025-12-11 10:41:23.57441557 +0000 UTC m=+400.392975159" Dec 11 10:41:28 crc kubenswrapper[5016]: I1211 10:41:28.893674 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-d7964759b-pgvq6"] Dec 11 10:41:28 crc kubenswrapper[5016]: I1211 10:41:28.894227 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" podUID="410ce4c8-1966-4aee-8af2-c50e64495713" containerName="controller-manager" containerID="cri-o://bdfdc0fe7dbbc74a96cda0bcd9aaf148ca7007ce20912555ad24a7d441cb34ae" gracePeriod=30 Dec 11 10:41:28 crc kubenswrapper[5016]: I1211 10:41:28.907256 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz"] Dec 11 10:41:28 crc kubenswrapper[5016]: I1211 10:41:28.907700 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" podUID="e4a612e1-3182-41e3-bba9-83269cf38fcc" containerName="route-controller-manager" containerID="cri-o://f02327108ad5e22c03a63a38d72675d74b6869ff57549863641ff1b9b6676977" gracePeriod=30 Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.524837 5016 generic.go:334] "Generic (PLEG): container finished" podID="e4a612e1-3182-41e3-bba9-83269cf38fcc" containerID="f02327108ad5e22c03a63a38d72675d74b6869ff57549863641ff1b9b6676977" exitCode=0 Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.524945 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" event={"ID":"e4a612e1-3182-41e3-bba9-83269cf38fcc","Type":"ContainerDied","Data":"f02327108ad5e22c03a63a38d72675d74b6869ff57549863641ff1b9b6676977"} Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.528414 5016 generic.go:334] "Generic (PLEG): container finished" podID="410ce4c8-1966-4aee-8af2-c50e64495713" containerID="bdfdc0fe7dbbc74a96cda0bcd9aaf148ca7007ce20912555ad24a7d441cb34ae" exitCode=0 Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.528449 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" event={"ID":"410ce4c8-1966-4aee-8af2-c50e64495713","Type":"ContainerDied","Data":"bdfdc0fe7dbbc74a96cda0bcd9aaf148ca7007ce20912555ad24a7d441cb34ae"} Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.790101 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.801870 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.898816 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e4a612e1-3182-41e3-bba9-83269cf38fcc-client-ca\") pod \"e4a612e1-3182-41e3-bba9-83269cf38fcc\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.899196 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a612e1-3182-41e3-bba9-83269cf38fcc-config\") pod \"e4a612e1-3182-41e3-bba9-83269cf38fcc\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.899286 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4a612e1-3182-41e3-bba9-83269cf38fcc-serving-cert\") pod \"e4a612e1-3182-41e3-bba9-83269cf38fcc\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.899321 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/410ce4c8-1966-4aee-8af2-c50e64495713-serving-cert\") pod \"410ce4c8-1966-4aee-8af2-c50e64495713\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.899350 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t5qw2\" (UniqueName: \"kubernetes.io/projected/410ce4c8-1966-4aee-8af2-c50e64495713-kube-api-access-t5qw2\") pod \"410ce4c8-1966-4aee-8af2-c50e64495713\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.899413 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdr9z\" (UniqueName: \"kubernetes.io/projected/e4a612e1-3182-41e3-bba9-83269cf38fcc-kube-api-access-vdr9z\") pod \"e4a612e1-3182-41e3-bba9-83269cf38fcc\" (UID: \"e4a612e1-3182-41e3-bba9-83269cf38fcc\") " Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.899451 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-client-ca\") pod \"410ce4c8-1966-4aee-8af2-c50e64495713\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.899482 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-config\") pod \"410ce4c8-1966-4aee-8af2-c50e64495713\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.899501 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-proxy-ca-bundles\") pod \"410ce4c8-1966-4aee-8af2-c50e64495713\" (UID: \"410ce4c8-1966-4aee-8af2-c50e64495713\") " Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.899692 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4a612e1-3182-41e3-bba9-83269cf38fcc-client-ca" (OuterVolumeSpecName: "client-ca") pod "e4a612e1-3182-41e3-bba9-83269cf38fcc" (UID: "e4a612e1-3182-41e3-bba9-83269cf38fcc"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.899733 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4a612e1-3182-41e3-bba9-83269cf38fcc-config" (OuterVolumeSpecName: "config") pod "e4a612e1-3182-41e3-bba9-83269cf38fcc" (UID: "e4a612e1-3182-41e3-bba9-83269cf38fcc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.900522 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-config" (OuterVolumeSpecName: "config") pod "410ce4c8-1966-4aee-8af2-c50e64495713" (UID: "410ce4c8-1966-4aee-8af2-c50e64495713"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.901015 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-client-ca" (OuterVolumeSpecName: "client-ca") pod "410ce4c8-1966-4aee-8af2-c50e64495713" (UID: "410ce4c8-1966-4aee-8af2-c50e64495713"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.901747 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "410ce4c8-1966-4aee-8af2-c50e64495713" (UID: "410ce4c8-1966-4aee-8af2-c50e64495713"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.911345 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/410ce4c8-1966-4aee-8af2-c50e64495713-kube-api-access-t5qw2" (OuterVolumeSpecName: "kube-api-access-t5qw2") pod "410ce4c8-1966-4aee-8af2-c50e64495713" (UID: "410ce4c8-1966-4aee-8af2-c50e64495713"). InnerVolumeSpecName "kube-api-access-t5qw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.911610 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/410ce4c8-1966-4aee-8af2-c50e64495713-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "410ce4c8-1966-4aee-8af2-c50e64495713" (UID: "410ce4c8-1966-4aee-8af2-c50e64495713"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.912466 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4a612e1-3182-41e3-bba9-83269cf38fcc-kube-api-access-vdr9z" (OuterVolumeSpecName: "kube-api-access-vdr9z") pod "e4a612e1-3182-41e3-bba9-83269cf38fcc" (UID: "e4a612e1-3182-41e3-bba9-83269cf38fcc"). InnerVolumeSpecName "kube-api-access-vdr9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:41:29 crc kubenswrapper[5016]: I1211 10:41:29.914409 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4a612e1-3182-41e3-bba9-83269cf38fcc-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e4a612e1-3182-41e3-bba9-83269cf38fcc" (UID: "e4a612e1-3182-41e3-bba9-83269cf38fcc"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.000979 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdr9z\" (UniqueName: \"kubernetes.io/projected/e4a612e1-3182-41e3-bba9-83269cf38fcc-kube-api-access-vdr9z\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.001020 5016 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.001031 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.001043 5016 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/410ce4c8-1966-4aee-8af2-c50e64495713-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.001053 5016 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e4a612e1-3182-41e3-bba9-83269cf38fcc-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.001063 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a612e1-3182-41e3-bba9-83269cf38fcc-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.001071 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e4a612e1-3182-41e3-bba9-83269cf38fcc-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.001081 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/410ce4c8-1966-4aee-8af2-c50e64495713-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.001090 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t5qw2\" (UniqueName: \"kubernetes.io/projected/410ce4c8-1966-4aee-8af2-c50e64495713-kube-api-access-t5qw2\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.075561 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-85bb984995-7kcnh"] Dec 11 10:41:30 crc kubenswrapper[5016]: E1211 10:41:30.075851 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="410ce4c8-1966-4aee-8af2-c50e64495713" containerName="controller-manager" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.075866 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="410ce4c8-1966-4aee-8af2-c50e64495713" containerName="controller-manager" Dec 11 10:41:30 crc kubenswrapper[5016]: E1211 10:41:30.075896 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4a612e1-3182-41e3-bba9-83269cf38fcc" containerName="route-controller-manager" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.075903 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4a612e1-3182-41e3-bba9-83269cf38fcc" containerName="route-controller-manager" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.076017 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="410ce4c8-1966-4aee-8af2-c50e64495713" containerName="controller-manager" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.076031 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4a612e1-3182-41e3-bba9-83269cf38fcc" containerName="route-controller-manager" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.076478 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.082802 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k"] Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.083614 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.087245 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k"] Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.094633 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-85bb984995-7kcnh"] Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.203624 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phzqv\" (UniqueName: \"kubernetes.io/projected/20b71f11-1d5d-4b2a-b324-ce50191fc700-kube-api-access-phzqv\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.203691 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-client-ca\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.203727 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8997c3f1-5a72-46f1-a610-819f5a89eb13-config\") pod \"route-controller-manager-5f5c5bf969-22x7k\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.203796 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8997c3f1-5a72-46f1-a610-819f5a89eb13-serving-cert\") pod \"route-controller-manager-5f5c5bf969-22x7k\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.203823 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20b71f11-1d5d-4b2a-b324-ce50191fc700-serving-cert\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.204100 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-config\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.204555 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8997c3f1-5a72-46f1-a610-819f5a89eb13-client-ca\") pod \"route-controller-manager-5f5c5bf969-22x7k\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.204621 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-proxy-ca-bundles\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.204660 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvhmt\" (UniqueName: \"kubernetes.io/projected/8997c3f1-5a72-46f1-a610-819f5a89eb13-kube-api-access-fvhmt\") pod \"route-controller-manager-5f5c5bf969-22x7k\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.305663 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8997c3f1-5a72-46f1-a610-819f5a89eb13-config\") pod \"route-controller-manager-5f5c5bf969-22x7k\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.305754 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8997c3f1-5a72-46f1-a610-819f5a89eb13-serving-cert\") pod \"route-controller-manager-5f5c5bf969-22x7k\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.305792 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20b71f11-1d5d-4b2a-b324-ce50191fc700-serving-cert\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.305843 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-config\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.305874 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8997c3f1-5a72-46f1-a610-819f5a89eb13-client-ca\") pod \"route-controller-manager-5f5c5bf969-22x7k\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.305897 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-proxy-ca-bundles\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.305976 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvhmt\" (UniqueName: \"kubernetes.io/projected/8997c3f1-5a72-46f1-a610-819f5a89eb13-kube-api-access-fvhmt\") pod \"route-controller-manager-5f5c5bf969-22x7k\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.306025 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phzqv\" (UniqueName: \"kubernetes.io/projected/20b71f11-1d5d-4b2a-b324-ce50191fc700-kube-api-access-phzqv\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.306056 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-client-ca\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.306997 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8997c3f1-5a72-46f1-a610-819f5a89eb13-client-ca\") pod \"route-controller-manager-5f5c5bf969-22x7k\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.307019 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-client-ca\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.307287 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8997c3f1-5a72-46f1-a610-819f5a89eb13-config\") pod \"route-controller-manager-5f5c5bf969-22x7k\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.307467 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-config\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.307898 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-proxy-ca-bundles\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.309671 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8997c3f1-5a72-46f1-a610-819f5a89eb13-serving-cert\") pod \"route-controller-manager-5f5c5bf969-22x7k\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.309714 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20b71f11-1d5d-4b2a-b324-ce50191fc700-serving-cert\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.325582 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phzqv\" (UniqueName: \"kubernetes.io/projected/20b71f11-1d5d-4b2a-b324-ce50191fc700-kube-api-access-phzqv\") pod \"controller-manager-85bb984995-7kcnh\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.332913 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvhmt\" (UniqueName: \"kubernetes.io/projected/8997c3f1-5a72-46f1-a610-819f5a89eb13-kube-api-access-fvhmt\") pod \"route-controller-manager-5f5c5bf969-22x7k\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.399969 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.407489 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.540600 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.540599 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d7964759b-pgvq6" event={"ID":"410ce4c8-1966-4aee-8af2-c50e64495713","Type":"ContainerDied","Data":"5fe313ad316432d70970addcbac2c45f95e2cef3ac2ed87d555ef7145462087d"} Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.540727 5016 scope.go:117] "RemoveContainer" containerID="bdfdc0fe7dbbc74a96cda0bcd9aaf148ca7007ce20912555ad24a7d441cb34ae" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.553584 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" event={"ID":"e4a612e1-3182-41e3-bba9-83269cf38fcc","Type":"ContainerDied","Data":"34cb9f09d0cf4e549177bd8eb5b4b4af8f1aa0afce59c4d7c738db9b25bea97a"} Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.553684 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.592437 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-d7964759b-pgvq6"] Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.598465 5016 scope.go:117] "RemoveContainer" containerID="f02327108ad5e22c03a63a38d72675d74b6869ff57549863641ff1b9b6676977" Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.599340 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-d7964759b-pgvq6"] Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.607859 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz"] Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.609830 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-847b99569b-rwcmz"] Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.699363 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-85bb984995-7kcnh"] Dec 11 10:41:30 crc kubenswrapper[5016]: I1211 10:41:30.744924 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k"] Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.074860 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-85bb984995-7kcnh"] Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.093732 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k"] Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.483059 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="410ce4c8-1966-4aee-8af2-c50e64495713" path="/var/lib/kubelet/pods/410ce4c8-1966-4aee-8af2-c50e64495713/volumes" Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.483752 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4a612e1-3182-41e3-bba9-83269cf38fcc" path="/var/lib/kubelet/pods/e4a612e1-3182-41e3-bba9-83269cf38fcc/volumes" Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.561159 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" event={"ID":"8997c3f1-5a72-46f1-a610-819f5a89eb13","Type":"ContainerStarted","Data":"6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351"} Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.561223 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" event={"ID":"8997c3f1-5a72-46f1-a610-819f5a89eb13","Type":"ContainerStarted","Data":"dbd0eb26ecd0c500fb52de5d95df832481ef5482bff63fff1e8022ebf479e401"} Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.563753 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.566779 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" event={"ID":"20b71f11-1d5d-4b2a-b324-ce50191fc700","Type":"ContainerStarted","Data":"9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62"} Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.566822 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" event={"ID":"20b71f11-1d5d-4b2a-b324-ce50191fc700","Type":"ContainerStarted","Data":"e53764d2f25092af76ed5c1b6552a516e0ae668748cb126233f2731ea697c2d7"} Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.568501 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.571377 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.572530 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.587237 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" podStartSLOduration=3.587216593 podStartE2EDuration="3.587216593s" podCreationTimestamp="2025-12-11 10:41:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:41:31.586115996 +0000 UTC m=+408.404675575" watchObservedRunningTime="2025-12-11 10:41:31.587216593 +0000 UTC m=+408.405776162" Dec 11 10:41:31 crc kubenswrapper[5016]: I1211 10:41:31.613753 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" podStartSLOduration=3.6137365900000002 podStartE2EDuration="3.61373659s" podCreationTimestamp="2025-12-11 10:41:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:41:31.610537251 +0000 UTC m=+408.429096850" watchObservedRunningTime="2025-12-11 10:41:31.61373659 +0000 UTC m=+408.432296169" Dec 11 10:41:32 crc kubenswrapper[5016]: I1211 10:41:32.584387 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" podUID="8997c3f1-5a72-46f1-a610-819f5a89eb13" containerName="route-controller-manager" containerID="cri-o://6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351" gracePeriod=30 Dec 11 10:41:32 crc kubenswrapper[5016]: I1211 10:41:32.584575 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" podUID="20b71f11-1d5d-4b2a-b324-ce50191fc700" containerName="controller-manager" containerID="cri-o://9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62" gracePeriod=30 Dec 11 10:41:32 crc kubenswrapper[5016]: I1211 10:41:32.963592 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:32 crc kubenswrapper[5016]: I1211 10:41:32.974696 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:32 crc kubenswrapper[5016]: I1211 10:41:32.995430 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96"] Dec 11 10:41:32 crc kubenswrapper[5016]: E1211 10:41:32.995688 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20b71f11-1d5d-4b2a-b324-ce50191fc700" containerName="controller-manager" Dec 11 10:41:32 crc kubenswrapper[5016]: I1211 10:41:32.995709 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="20b71f11-1d5d-4b2a-b324-ce50191fc700" containerName="controller-manager" Dec 11 10:41:32 crc kubenswrapper[5016]: E1211 10:41:32.995730 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8997c3f1-5a72-46f1-a610-819f5a89eb13" containerName="route-controller-manager" Dec 11 10:41:32 crc kubenswrapper[5016]: I1211 10:41:32.995740 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="8997c3f1-5a72-46f1-a610-819f5a89eb13" containerName="route-controller-manager" Dec 11 10:41:32 crc kubenswrapper[5016]: I1211 10:41:32.996043 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="20b71f11-1d5d-4b2a-b324-ce50191fc700" containerName="controller-manager" Dec 11 10:41:32 crc kubenswrapper[5016]: I1211 10:41:32.996094 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="8997c3f1-5a72-46f1-a610-819f5a89eb13" containerName="route-controller-manager" Dec 11 10:41:32 crc kubenswrapper[5016]: I1211 10:41:32.996684 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.010021 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96"] Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.111340 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20b71f11-1d5d-4b2a-b324-ce50191fc700-serving-cert\") pod \"20b71f11-1d5d-4b2a-b324-ce50191fc700\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.111407 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phzqv\" (UniqueName: \"kubernetes.io/projected/20b71f11-1d5d-4b2a-b324-ce50191fc700-kube-api-access-phzqv\") pod \"20b71f11-1d5d-4b2a-b324-ce50191fc700\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.111453 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-proxy-ca-bundles\") pod \"20b71f11-1d5d-4b2a-b324-ce50191fc700\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.111475 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8997c3f1-5a72-46f1-a610-819f5a89eb13-serving-cert\") pod \"8997c3f1-5a72-46f1-a610-819f5a89eb13\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.111492 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-client-ca\") pod \"20b71f11-1d5d-4b2a-b324-ce50191fc700\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.111508 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvhmt\" (UniqueName: \"kubernetes.io/projected/8997c3f1-5a72-46f1-a610-819f5a89eb13-kube-api-access-fvhmt\") pod \"8997c3f1-5a72-46f1-a610-819f5a89eb13\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.111548 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8997c3f1-5a72-46f1-a610-819f5a89eb13-client-ca\") pod \"8997c3f1-5a72-46f1-a610-819f5a89eb13\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.111691 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8997c3f1-5a72-46f1-a610-819f5a89eb13-config\") pod \"8997c3f1-5a72-46f1-a610-819f5a89eb13\" (UID: \"8997c3f1-5a72-46f1-a610-819f5a89eb13\") " Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.111827 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-config\") pod \"20b71f11-1d5d-4b2a-b324-ce50191fc700\" (UID: \"20b71f11-1d5d-4b2a-b324-ce50191fc700\") " Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.112119 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbd16799-2195-4cd1-9794-662c7ac6acec-config\") pod \"route-controller-manager-69c79dd4cc-98t96\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.112175 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dbd16799-2195-4cd1-9794-662c7ac6acec-client-ca\") pod \"route-controller-manager-69c79dd4cc-98t96\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.112203 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfhwr\" (UniqueName: \"kubernetes.io/projected/dbd16799-2195-4cd1-9794-662c7ac6acec-kube-api-access-bfhwr\") pod \"route-controller-manager-69c79dd4cc-98t96\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.112265 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbd16799-2195-4cd1-9794-662c7ac6acec-serving-cert\") pod \"route-controller-manager-69c79dd4cc-98t96\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.112251 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-client-ca" (OuterVolumeSpecName: "client-ca") pod "20b71f11-1d5d-4b2a-b324-ce50191fc700" (UID: "20b71f11-1d5d-4b2a-b324-ce50191fc700"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.112234 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "20b71f11-1d5d-4b2a-b324-ce50191fc700" (UID: "20b71f11-1d5d-4b2a-b324-ce50191fc700"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.112379 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8997c3f1-5a72-46f1-a610-819f5a89eb13-client-ca" (OuterVolumeSpecName: "client-ca") pod "8997c3f1-5a72-46f1-a610-819f5a89eb13" (UID: "8997c3f1-5a72-46f1-a610-819f5a89eb13"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.112776 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8997c3f1-5a72-46f1-a610-819f5a89eb13-config" (OuterVolumeSpecName: "config") pod "8997c3f1-5a72-46f1-a610-819f5a89eb13" (UID: "8997c3f1-5a72-46f1-a610-819f5a89eb13"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.113344 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-config" (OuterVolumeSpecName: "config") pod "20b71f11-1d5d-4b2a-b324-ce50191fc700" (UID: "20b71f11-1d5d-4b2a-b324-ce50191fc700"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.117278 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8997c3f1-5a72-46f1-a610-819f5a89eb13-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8997c3f1-5a72-46f1-a610-819f5a89eb13" (UID: "8997c3f1-5a72-46f1-a610-819f5a89eb13"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.117423 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b71f11-1d5d-4b2a-b324-ce50191fc700-kube-api-access-phzqv" (OuterVolumeSpecName: "kube-api-access-phzqv") pod "20b71f11-1d5d-4b2a-b324-ce50191fc700" (UID: "20b71f11-1d5d-4b2a-b324-ce50191fc700"). InnerVolumeSpecName "kube-api-access-phzqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.117534 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b71f11-1d5d-4b2a-b324-ce50191fc700-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "20b71f11-1d5d-4b2a-b324-ce50191fc700" (UID: "20b71f11-1d5d-4b2a-b324-ce50191fc700"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.118379 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8997c3f1-5a72-46f1-a610-819f5a89eb13-kube-api-access-fvhmt" (OuterVolumeSpecName: "kube-api-access-fvhmt") pod "8997c3f1-5a72-46f1-a610-819f5a89eb13" (UID: "8997c3f1-5a72-46f1-a610-819f5a89eb13"). InnerVolumeSpecName "kube-api-access-fvhmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.213360 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbd16799-2195-4cd1-9794-662c7ac6acec-config\") pod \"route-controller-manager-69c79dd4cc-98t96\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.213464 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dbd16799-2195-4cd1-9794-662c7ac6acec-client-ca\") pod \"route-controller-manager-69c79dd4cc-98t96\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.213492 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfhwr\" (UniqueName: \"kubernetes.io/projected/dbd16799-2195-4cd1-9794-662c7ac6acec-kube-api-access-bfhwr\") pod \"route-controller-manager-69c79dd4cc-98t96\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.213544 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbd16799-2195-4cd1-9794-662c7ac6acec-serving-cert\") pod \"route-controller-manager-69c79dd4cc-98t96\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.214112 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20b71f11-1d5d-4b2a-b324-ce50191fc700-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.214615 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phzqv\" (UniqueName: \"kubernetes.io/projected/20b71f11-1d5d-4b2a-b324-ce50191fc700-kube-api-access-phzqv\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.214643 5016 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.214636 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dbd16799-2195-4cd1-9794-662c7ac6acec-client-ca\") pod \"route-controller-manager-69c79dd4cc-98t96\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.214660 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8997c3f1-5a72-46f1-a610-819f5a89eb13-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.214724 5016 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.214742 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvhmt\" (UniqueName: \"kubernetes.io/projected/8997c3f1-5a72-46f1-a610-819f5a89eb13-kube-api-access-fvhmt\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.214759 5016 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8997c3f1-5a72-46f1-a610-819f5a89eb13-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.214773 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8997c3f1-5a72-46f1-a610-819f5a89eb13-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.214788 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b71f11-1d5d-4b2a-b324-ce50191fc700-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.215482 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbd16799-2195-4cd1-9794-662c7ac6acec-config\") pod \"route-controller-manager-69c79dd4cc-98t96\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.217830 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbd16799-2195-4cd1-9794-662c7ac6acec-serving-cert\") pod \"route-controller-manager-69c79dd4cc-98t96\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.233662 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfhwr\" (UniqueName: \"kubernetes.io/projected/dbd16799-2195-4cd1-9794-662c7ac6acec-kube-api-access-bfhwr\") pod \"route-controller-manager-69c79dd4cc-98t96\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.314558 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.513174 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96"] Dec 11 10:41:33 crc kubenswrapper[5016]: W1211 10:41:33.513536 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbd16799_2195_4cd1_9794_662c7ac6acec.slice/crio-a29a45c12a785846fb3014e155d54c58f56d3acacaa0fc681dbf1e1380682ce5 WatchSource:0}: Error finding container a29a45c12a785846fb3014e155d54c58f56d3acacaa0fc681dbf1e1380682ce5: Status 404 returned error can't find the container with id a29a45c12a785846fb3014e155d54c58f56d3acacaa0fc681dbf1e1380682ce5 Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.591970 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" event={"ID":"dbd16799-2195-4cd1-9794-662c7ac6acec","Type":"ContainerStarted","Data":"a29a45c12a785846fb3014e155d54c58f56d3acacaa0fc681dbf1e1380682ce5"} Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.593810 5016 generic.go:334] "Generic (PLEG): container finished" podID="20b71f11-1d5d-4b2a-b324-ce50191fc700" containerID="9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62" exitCode=0 Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.593854 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.593912 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" event={"ID":"20b71f11-1d5d-4b2a-b324-ce50191fc700","Type":"ContainerDied","Data":"9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62"} Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.594049 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85bb984995-7kcnh" event={"ID":"20b71f11-1d5d-4b2a-b324-ce50191fc700","Type":"ContainerDied","Data":"e53764d2f25092af76ed5c1b6552a516e0ae668748cb126233f2731ea697c2d7"} Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.594075 5016 scope.go:117] "RemoveContainer" containerID="9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.596245 5016 generic.go:334] "Generic (PLEG): container finished" podID="8997c3f1-5a72-46f1-a610-819f5a89eb13" containerID="6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351" exitCode=0 Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.596284 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" event={"ID":"8997c3f1-5a72-46f1-a610-819f5a89eb13","Type":"ContainerDied","Data":"6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351"} Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.596501 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" event={"ID":"8997c3f1-5a72-46f1-a610-819f5a89eb13","Type":"ContainerDied","Data":"dbd0eb26ecd0c500fb52de5d95df832481ef5482bff63fff1e8022ebf479e401"} Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.596560 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.612957 5016 scope.go:117] "RemoveContainer" containerID="9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62" Dec 11 10:41:33 crc kubenswrapper[5016]: E1211 10:41:33.613379 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62\": container with ID starting with 9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62 not found: ID does not exist" containerID="9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.613415 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62"} err="failed to get container status \"9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62\": rpc error: code = NotFound desc = could not find container \"9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62\": container with ID starting with 9fc45dbc01f48113e84b64f1df2a3c70791a949137ad4648cf8d00efda81ca62 not found: ID does not exist" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.613443 5016 scope.go:117] "RemoveContainer" containerID="6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.642448 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-85bb984995-7kcnh"] Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.649261 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-85bb984995-7kcnh"] Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.650249 5016 scope.go:117] "RemoveContainer" containerID="6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351" Dec 11 10:41:33 crc kubenswrapper[5016]: E1211 10:41:33.650641 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351\": container with ID starting with 6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351 not found: ID does not exist" containerID="6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.650692 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351"} err="failed to get container status \"6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351\": rpc error: code = NotFound desc = could not find container \"6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351\": container with ID starting with 6b13130d079da98d238ee2f9772e27503c0d6bb98dfb2745de710c4740f1d351 not found: ID does not exist" Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.652973 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k"] Dec 11 10:41:33 crc kubenswrapper[5016]: I1211 10:41:33.658918 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f5c5bf969-22x7k"] Dec 11 10:41:34 crc kubenswrapper[5016]: I1211 10:41:34.605919 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" event={"ID":"dbd16799-2195-4cd1-9794-662c7ac6acec","Type":"ContainerStarted","Data":"bb720ab41bc35bbbbceb6f018e54fea64f144a42c77c268d6720ac4331de511b"} Dec 11 10:41:34 crc kubenswrapper[5016]: I1211 10:41:34.606285 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:34 crc kubenswrapper[5016]: I1211 10:41:34.610757 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:41:34 crc kubenswrapper[5016]: I1211 10:41:34.624730 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" podStartSLOduration=3.624709532 podStartE2EDuration="3.624709532s" podCreationTimestamp="2025-12-11 10:41:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:41:34.624400094 +0000 UTC m=+411.442959683" watchObservedRunningTime="2025-12-11 10:41:34.624709532 +0000 UTC m=+411.443269101" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.080027 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5db558bd57-lsm65"] Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.080888 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.086368 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.086576 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.086601 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.086748 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.091837 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.092368 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.097610 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5db558bd57-lsm65"] Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.102155 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.243516 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-config\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.243589 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zg84f\" (UniqueName: \"kubernetes.io/projected/8f994c93-d45f-48da-bb25-c0038b8fcc05-kube-api-access-zg84f\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.243735 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-proxy-ca-bundles\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.243765 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f994c93-d45f-48da-bb25-c0038b8fcc05-serving-cert\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.243817 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-client-ca\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.345670 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-proxy-ca-bundles\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.345764 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f994c93-d45f-48da-bb25-c0038b8fcc05-serving-cert\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.345811 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-client-ca\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.345851 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-config\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.345890 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zg84f\" (UniqueName: \"kubernetes.io/projected/8f994c93-d45f-48da-bb25-c0038b8fcc05-kube-api-access-zg84f\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.347342 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-client-ca\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.347855 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-config\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.348192 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-proxy-ca-bundles\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.353014 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f994c93-d45f-48da-bb25-c0038b8fcc05-serving-cert\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.368962 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zg84f\" (UniqueName: \"kubernetes.io/projected/8f994c93-d45f-48da-bb25-c0038b8fcc05-kube-api-access-zg84f\") pod \"controller-manager-5db558bd57-lsm65\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.402311 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.482645 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b71f11-1d5d-4b2a-b324-ce50191fc700" path="/var/lib/kubelet/pods/20b71f11-1d5d-4b2a-b324-ce50191fc700/volumes" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.483748 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8997c3f1-5a72-46f1-a610-819f5a89eb13" path="/var/lib/kubelet/pods/8997c3f1-5a72-46f1-a610-819f5a89eb13/volumes" Dec 11 10:41:35 crc kubenswrapper[5016]: I1211 10:41:35.770706 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5db558bd57-lsm65"] Dec 11 10:41:35 crc kubenswrapper[5016]: W1211 10:41:35.775048 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f994c93_d45f_48da_bb25_c0038b8fcc05.slice/crio-9eb18bb4ae73057f804e3b003afe998b116c45036e7686822484f27bca26edb4 WatchSource:0}: Error finding container 9eb18bb4ae73057f804e3b003afe998b116c45036e7686822484f27bca26edb4: Status 404 returned error can't find the container with id 9eb18bb4ae73057f804e3b003afe998b116c45036e7686822484f27bca26edb4 Dec 11 10:41:36 crc kubenswrapper[5016]: I1211 10:41:36.621557 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" event={"ID":"8f994c93-d45f-48da-bb25-c0038b8fcc05","Type":"ContainerStarted","Data":"531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0"} Dec 11 10:41:36 crc kubenswrapper[5016]: I1211 10:41:36.621974 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:36 crc kubenswrapper[5016]: I1211 10:41:36.621997 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" event={"ID":"8f994c93-d45f-48da-bb25-c0038b8fcc05","Type":"ContainerStarted","Data":"9eb18bb4ae73057f804e3b003afe998b116c45036e7686822484f27bca26edb4"} Dec 11 10:41:36 crc kubenswrapper[5016]: I1211 10:41:36.626356 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:41:36 crc kubenswrapper[5016]: I1211 10:41:36.637340 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" podStartSLOduration=5.637320572 podStartE2EDuration="5.637320572s" podCreationTimestamp="2025-12-11 10:41:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:41:36.636225965 +0000 UTC m=+413.454785554" watchObservedRunningTime="2025-12-11 10:41:36.637320572 +0000 UTC m=+413.455880161" Dec 11 10:41:42 crc kubenswrapper[5016]: I1211 10:41:42.933432 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:41:42 crc kubenswrapper[5016]: I1211 10:41:42.935731 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.353746 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r5rgf"] Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.354811 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-r5rgf" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" containerName="registry-server" containerID="cri-o://4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6" gracePeriod=2 Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.553416 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fc9qn"] Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.553643 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fc9qn" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" containerName="registry-server" containerID="cri-o://a292df12190a62073c2372ceb6f52804000afd75f5a1a64e492f7c67a12d73d6" gracePeriod=2 Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.706054 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.766496 5016 generic.go:334] "Generic (PLEG): container finished" podID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" containerID="a292df12190a62073c2372ceb6f52804000afd75f5a1a64e492f7c67a12d73d6" exitCode=0 Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.766580 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc9qn" event={"ID":"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4","Type":"ContainerDied","Data":"a292df12190a62073c2372ceb6f52804000afd75f5a1a64e492f7c67a12d73d6"} Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.770558 5016 generic.go:334] "Generic (PLEG): container finished" podID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" containerID="4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6" exitCode=0 Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.770621 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r5rgf" event={"ID":"623ddc04-83e2-42ac-bcac-59b72d2fac2a","Type":"ContainerDied","Data":"4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6"} Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.770661 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r5rgf" event={"ID":"623ddc04-83e2-42ac-bcac-59b72d2fac2a","Type":"ContainerDied","Data":"927c291325ed36a7a29e3781145edeb3363d491148d73f8f1b4aae3e43303023"} Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.770683 5016 scope.go:117] "RemoveContainer" containerID="4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.770706 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r5rgf" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.792489 5016 scope.go:117] "RemoveContainer" containerID="733df02a42dee187fdc183518fac16020eb95499f4b1cfc4062760e56d363f32" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.813068 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctk8w\" (UniqueName: \"kubernetes.io/projected/623ddc04-83e2-42ac-bcac-59b72d2fac2a-kube-api-access-ctk8w\") pod \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\" (UID: \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\") " Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.813291 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/623ddc04-83e2-42ac-bcac-59b72d2fac2a-utilities\") pod \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\" (UID: \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\") " Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.813411 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/623ddc04-83e2-42ac-bcac-59b72d2fac2a-catalog-content\") pod \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\" (UID: \"623ddc04-83e2-42ac-bcac-59b72d2fac2a\") " Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.814814 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/623ddc04-83e2-42ac-bcac-59b72d2fac2a-utilities" (OuterVolumeSpecName: "utilities") pod "623ddc04-83e2-42ac-bcac-59b72d2fac2a" (UID: "623ddc04-83e2-42ac-bcac-59b72d2fac2a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.816689 5016 scope.go:117] "RemoveContainer" containerID="6d835a48c6d414c9097a434358701ace6f08f0bc9d08c01c978ed8c4201befb3" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.820033 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/623ddc04-83e2-42ac-bcac-59b72d2fac2a-kube-api-access-ctk8w" (OuterVolumeSpecName: "kube-api-access-ctk8w") pod "623ddc04-83e2-42ac-bcac-59b72d2fac2a" (UID: "623ddc04-83e2-42ac-bcac-59b72d2fac2a"). InnerVolumeSpecName "kube-api-access-ctk8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.850825 5016 scope.go:117] "RemoveContainer" containerID="4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6" Dec 11 10:41:58 crc kubenswrapper[5016]: E1211 10:41:58.852349 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6\": container with ID starting with 4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6 not found: ID does not exist" containerID="4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.852375 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6"} err="failed to get container status \"4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6\": rpc error: code = NotFound desc = could not find container \"4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6\": container with ID starting with 4a8521de0c5a4ff41a50cb637134281a05894ea4ea87bd99003a27dfacecffd6 not found: ID does not exist" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.852397 5016 scope.go:117] "RemoveContainer" containerID="733df02a42dee187fdc183518fac16020eb95499f4b1cfc4062760e56d363f32" Dec 11 10:41:58 crc kubenswrapper[5016]: E1211 10:41:58.852919 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"733df02a42dee187fdc183518fac16020eb95499f4b1cfc4062760e56d363f32\": container with ID starting with 733df02a42dee187fdc183518fac16020eb95499f4b1cfc4062760e56d363f32 not found: ID does not exist" containerID="733df02a42dee187fdc183518fac16020eb95499f4b1cfc4062760e56d363f32" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.852946 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"733df02a42dee187fdc183518fac16020eb95499f4b1cfc4062760e56d363f32"} err="failed to get container status \"733df02a42dee187fdc183518fac16020eb95499f4b1cfc4062760e56d363f32\": rpc error: code = NotFound desc = could not find container \"733df02a42dee187fdc183518fac16020eb95499f4b1cfc4062760e56d363f32\": container with ID starting with 733df02a42dee187fdc183518fac16020eb95499f4b1cfc4062760e56d363f32 not found: ID does not exist" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.852983 5016 scope.go:117] "RemoveContainer" containerID="6d835a48c6d414c9097a434358701ace6f08f0bc9d08c01c978ed8c4201befb3" Dec 11 10:41:58 crc kubenswrapper[5016]: E1211 10:41:58.853256 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d835a48c6d414c9097a434358701ace6f08f0bc9d08c01c978ed8c4201befb3\": container with ID starting with 6d835a48c6d414c9097a434358701ace6f08f0bc9d08c01c978ed8c4201befb3 not found: ID does not exist" containerID="6d835a48c6d414c9097a434358701ace6f08f0bc9d08c01c978ed8c4201befb3" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.853293 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d835a48c6d414c9097a434358701ace6f08f0bc9d08c01c978ed8c4201befb3"} err="failed to get container status \"6d835a48c6d414c9097a434358701ace6f08f0bc9d08c01c978ed8c4201befb3\": rpc error: code = NotFound desc = could not find container \"6d835a48c6d414c9097a434358701ace6f08f0bc9d08c01c978ed8c4201befb3\": container with ID starting with 6d835a48c6d414c9097a434358701ace6f08f0bc9d08c01c978ed8c4201befb3 not found: ID does not exist" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.859095 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.897754 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/623ddc04-83e2-42ac-bcac-59b72d2fac2a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "623ddc04-83e2-42ac-bcac-59b72d2fac2a" (UID: "623ddc04-83e2-42ac-bcac-59b72d2fac2a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.914416 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-utilities\") pod \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\" (UID: \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\") " Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.914478 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7m5zn\" (UniqueName: \"kubernetes.io/projected/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-kube-api-access-7m5zn\") pod \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\" (UID: \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\") " Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.914550 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-catalog-content\") pod \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\" (UID: \"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4\") " Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.914822 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/623ddc04-83e2-42ac-bcac-59b72d2fac2a-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.914834 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctk8w\" (UniqueName: \"kubernetes.io/projected/623ddc04-83e2-42ac-bcac-59b72d2fac2a-kube-api-access-ctk8w\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.914846 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/623ddc04-83e2-42ac-bcac-59b72d2fac2a-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.918523 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-kube-api-access-7m5zn" (OuterVolumeSpecName: "kube-api-access-7m5zn") pod "8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" (UID: "8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4"). InnerVolumeSpecName "kube-api-access-7m5zn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.921169 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-utilities" (OuterVolumeSpecName: "utilities") pod "8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" (UID: "8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:41:58 crc kubenswrapper[5016]: I1211 10:41:58.971707 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" (UID: "8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.015724 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.015760 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7m5zn\" (UniqueName: \"kubernetes.io/projected/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-kube-api-access-7m5zn\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.015770 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.101520 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-r5rgf"] Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.106076 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-r5rgf"] Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.483806 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" path="/var/lib/kubelet/pods/623ddc04-83e2-42ac-bcac-59b72d2fac2a/volumes" Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.780361 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc9qn" event={"ID":"8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4","Type":"ContainerDied","Data":"fce4f97e5cfa2867c9b7f8677c1d308cafd11e58a1bad0e63c79d1d3bea75cbf"} Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.780418 5016 scope.go:117] "RemoveContainer" containerID="a292df12190a62073c2372ceb6f52804000afd75f5a1a64e492f7c67a12d73d6" Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.780587 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fc9qn" Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.801222 5016 scope.go:117] "RemoveContainer" containerID="b431768e4cd8d8a61fe495ce85d6971147adc545649c29e350f2fa7ac028c6ad" Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.801424 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fc9qn"] Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.805360 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fc9qn"] Dec 11 10:41:59 crc kubenswrapper[5016]: I1211 10:41:59.822195 5016 scope.go:117] "RemoveContainer" containerID="bc00277dce91936233ab5a9a197e44358d060fb7dfca16dce668703e745492a5" Dec 11 10:42:00 crc kubenswrapper[5016]: I1211 10:42:00.670027 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5db558bd57-lsm65"] Dec 11 10:42:00 crc kubenswrapper[5016]: I1211 10:42:00.670338 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" podUID="8f994c93-d45f-48da-bb25-c0038b8fcc05" containerName="controller-manager" containerID="cri-o://531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0" gracePeriod=30 Dec 11 10:42:00 crc kubenswrapper[5016]: I1211 10:42:00.755792 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vb25j"] Dec 11 10:42:00 crc kubenswrapper[5016]: I1211 10:42:00.756111 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vb25j" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" containerName="registry-server" containerID="cri-o://1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53" gracePeriod=2 Dec 11 10:42:00 crc kubenswrapper[5016]: I1211 10:42:00.952652 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rzcjf"] Dec 11 10:42:00 crc kubenswrapper[5016]: I1211 10:42:00.952973 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rzcjf" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" containerName="registry-server" containerID="cri-o://96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261" gracePeriod=2 Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.127463 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.241355 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwsjx\" (UniqueName: \"kubernetes.io/projected/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-kube-api-access-nwsjx\") pod \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\" (UID: \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\") " Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.241488 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-catalog-content\") pod \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\" (UID: \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\") " Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.241527 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-utilities\") pod \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\" (UID: \"edb91373-b8a5-4426-9a6b-1fbb6c9f2846\") " Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.242494 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-utilities" (OuterVolumeSpecName: "utilities") pod "edb91373-b8a5-4426-9a6b-1fbb6c9f2846" (UID: "edb91373-b8a5-4426-9a6b-1fbb6c9f2846"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.249349 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-kube-api-access-nwsjx" (OuterVolumeSpecName: "kube-api-access-nwsjx") pod "edb91373-b8a5-4426-9a6b-1fbb6c9f2846" (UID: "edb91373-b8a5-4426-9a6b-1fbb6c9f2846"). InnerVolumeSpecName "kube-api-access-nwsjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.272371 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "edb91373-b8a5-4426-9a6b-1fbb6c9f2846" (UID: "edb91373-b8a5-4426-9a6b-1fbb6c9f2846"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.302975 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.347373 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwsjx\" (UniqueName: \"kubernetes.io/projected/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-kube-api-access-nwsjx\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.347406 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.347417 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edb91373-b8a5-4426-9a6b-1fbb6c9f2846-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.448124 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89fda315-d1f2-484a-aa91-ec75f0b0227e-catalog-content\") pod \"89fda315-d1f2-484a-aa91-ec75f0b0227e\" (UID: \"89fda315-d1f2-484a-aa91-ec75f0b0227e\") " Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.448179 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqbf7\" (UniqueName: \"kubernetes.io/projected/89fda315-d1f2-484a-aa91-ec75f0b0227e-kube-api-access-xqbf7\") pod \"89fda315-d1f2-484a-aa91-ec75f0b0227e\" (UID: \"89fda315-d1f2-484a-aa91-ec75f0b0227e\") " Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.448285 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89fda315-d1f2-484a-aa91-ec75f0b0227e-utilities\") pod \"89fda315-d1f2-484a-aa91-ec75f0b0227e\" (UID: \"89fda315-d1f2-484a-aa91-ec75f0b0227e\") " Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.449228 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89fda315-d1f2-484a-aa91-ec75f0b0227e-utilities" (OuterVolumeSpecName: "utilities") pod "89fda315-d1f2-484a-aa91-ec75f0b0227e" (UID: "89fda315-d1f2-484a-aa91-ec75f0b0227e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.451607 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89fda315-d1f2-484a-aa91-ec75f0b0227e-kube-api-access-xqbf7" (OuterVolumeSpecName: "kube-api-access-xqbf7") pod "89fda315-d1f2-484a-aa91-ec75f0b0227e" (UID: "89fda315-d1f2-484a-aa91-ec75f0b0227e"). InnerVolumeSpecName "kube-api-access-xqbf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.476712 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.480455 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" path="/var/lib/kubelet/pods/8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4/volumes" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.550366 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-proxy-ca-bundles\") pod \"8f994c93-d45f-48da-bb25-c0038b8fcc05\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.550451 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-client-ca\") pod \"8f994c93-d45f-48da-bb25-c0038b8fcc05\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.550568 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zg84f\" (UniqueName: \"kubernetes.io/projected/8f994c93-d45f-48da-bb25-c0038b8fcc05-kube-api-access-zg84f\") pod \"8f994c93-d45f-48da-bb25-c0038b8fcc05\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.550609 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f994c93-d45f-48da-bb25-c0038b8fcc05-serving-cert\") pod \"8f994c93-d45f-48da-bb25-c0038b8fcc05\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.550647 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-config\") pod \"8f994c93-d45f-48da-bb25-c0038b8fcc05\" (UID: \"8f994c93-d45f-48da-bb25-c0038b8fcc05\") " Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.551164 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqbf7\" (UniqueName: \"kubernetes.io/projected/89fda315-d1f2-484a-aa91-ec75f0b0227e-kube-api-access-xqbf7\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.551189 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89fda315-d1f2-484a-aa91-ec75f0b0227e-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.552008 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-config" (OuterVolumeSpecName: "config") pod "8f994c93-d45f-48da-bb25-c0038b8fcc05" (UID: "8f994c93-d45f-48da-bb25-c0038b8fcc05"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.552155 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "8f994c93-d45f-48da-bb25-c0038b8fcc05" (UID: "8f994c93-d45f-48da-bb25-c0038b8fcc05"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.552695 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-client-ca" (OuterVolumeSpecName: "client-ca") pod "8f994c93-d45f-48da-bb25-c0038b8fcc05" (UID: "8f994c93-d45f-48da-bb25-c0038b8fcc05"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.554023 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f994c93-d45f-48da-bb25-c0038b8fcc05-kube-api-access-zg84f" (OuterVolumeSpecName: "kube-api-access-zg84f") pod "8f994c93-d45f-48da-bb25-c0038b8fcc05" (UID: "8f994c93-d45f-48da-bb25-c0038b8fcc05"). InnerVolumeSpecName "kube-api-access-zg84f". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.554289 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f994c93-d45f-48da-bb25-c0038b8fcc05-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8f994c93-d45f-48da-bb25-c0038b8fcc05" (UID: "8f994c93-d45f-48da-bb25-c0038b8fcc05"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.566709 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89fda315-d1f2-484a-aa91-ec75f0b0227e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "89fda315-d1f2-484a-aa91-ec75f0b0227e" (UID: "89fda315-d1f2-484a-aa91-ec75f0b0227e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.653067 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89fda315-d1f2-484a-aa91-ec75f0b0227e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.654547 5016 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.654593 5016 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.654606 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zg84f\" (UniqueName: \"kubernetes.io/projected/8f994c93-d45f-48da-bb25-c0038b8fcc05-kube-api-access-zg84f\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.654621 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f994c93-d45f-48da-bb25-c0038b8fcc05-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.654632 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f994c93-d45f-48da-bb25-c0038b8fcc05-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.798099 5016 generic.go:334] "Generic (PLEG): container finished" podID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" containerID="1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53" exitCode=0 Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.798376 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vb25j" event={"ID":"edb91373-b8a5-4426-9a6b-1fbb6c9f2846","Type":"ContainerDied","Data":"1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53"} Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.798433 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vb25j" event={"ID":"edb91373-b8a5-4426-9a6b-1fbb6c9f2846","Type":"ContainerDied","Data":"f74cbaf4b2b284ec3a59de230d87650b678e7c27a1722e13358d539c64c19d8e"} Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.798454 5016 scope.go:117] "RemoveContainer" containerID="1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.799854 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vb25j" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.804071 5016 generic.go:334] "Generic (PLEG): container finished" podID="89fda315-d1f2-484a-aa91-ec75f0b0227e" containerID="96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261" exitCode=0 Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.804191 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcjf" event={"ID":"89fda315-d1f2-484a-aa91-ec75f0b0227e","Type":"ContainerDied","Data":"96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261"} Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.804223 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rzcjf" event={"ID":"89fda315-d1f2-484a-aa91-ec75f0b0227e","Type":"ContainerDied","Data":"d63b4a95dba1ff0eef97f235c276d3dbb696c39120318f9a03258442070a990c"} Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.804336 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rzcjf" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.810043 5016 generic.go:334] "Generic (PLEG): container finished" podID="8f994c93-d45f-48da-bb25-c0038b8fcc05" containerID="531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0" exitCode=0 Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.810095 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" event={"ID":"8f994c93-d45f-48da-bb25-c0038b8fcc05","Type":"ContainerDied","Data":"531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0"} Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.810109 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.810128 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5db558bd57-lsm65" event={"ID":"8f994c93-d45f-48da-bb25-c0038b8fcc05","Type":"ContainerDied","Data":"9eb18bb4ae73057f804e3b003afe998b116c45036e7686822484f27bca26edb4"} Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.825563 5016 scope.go:117] "RemoveContainer" containerID="ccfdfec75c4e77a73d5caab4411aa800938e0d4074f1dfff044d49a9b74b4566" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.832084 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vb25j"] Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.836448 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vb25j"] Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.851871 5016 scope.go:117] "RemoveContainer" containerID="f2bd2d9b047ad76d526e427054224496aa8abb4a09d50c5b645da64c4c8f3410" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.852081 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rzcjf"] Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.854360 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rzcjf"] Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.878150 5016 scope.go:117] "RemoveContainer" containerID="1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53" Dec 11 10:42:01 crc kubenswrapper[5016]: E1211 10:42:01.878764 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53\": container with ID starting with 1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53 not found: ID does not exist" containerID="1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.878870 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53"} err="failed to get container status \"1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53\": rpc error: code = NotFound desc = could not find container \"1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53\": container with ID starting with 1af2f272104ca1cabfe0d8668cba758ccb83db3dbfbad9802a3df2e44d437f53 not found: ID does not exist" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.878979 5016 scope.go:117] "RemoveContainer" containerID="ccfdfec75c4e77a73d5caab4411aa800938e0d4074f1dfff044d49a9b74b4566" Dec 11 10:42:01 crc kubenswrapper[5016]: E1211 10:42:01.879292 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ccfdfec75c4e77a73d5caab4411aa800938e0d4074f1dfff044d49a9b74b4566\": container with ID starting with ccfdfec75c4e77a73d5caab4411aa800938e0d4074f1dfff044d49a9b74b4566 not found: ID does not exist" containerID="ccfdfec75c4e77a73d5caab4411aa800938e0d4074f1dfff044d49a9b74b4566" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.879390 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ccfdfec75c4e77a73d5caab4411aa800938e0d4074f1dfff044d49a9b74b4566"} err="failed to get container status \"ccfdfec75c4e77a73d5caab4411aa800938e0d4074f1dfff044d49a9b74b4566\": rpc error: code = NotFound desc = could not find container \"ccfdfec75c4e77a73d5caab4411aa800938e0d4074f1dfff044d49a9b74b4566\": container with ID starting with ccfdfec75c4e77a73d5caab4411aa800938e0d4074f1dfff044d49a9b74b4566 not found: ID does not exist" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.879457 5016 scope.go:117] "RemoveContainer" containerID="f2bd2d9b047ad76d526e427054224496aa8abb4a09d50c5b645da64c4c8f3410" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.879572 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5db558bd57-lsm65"] Dec 11 10:42:01 crc kubenswrapper[5016]: E1211 10:42:01.879740 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2bd2d9b047ad76d526e427054224496aa8abb4a09d50c5b645da64c4c8f3410\": container with ID starting with f2bd2d9b047ad76d526e427054224496aa8abb4a09d50c5b645da64c4c8f3410 not found: ID does not exist" containerID="f2bd2d9b047ad76d526e427054224496aa8abb4a09d50c5b645da64c4c8f3410" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.879860 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2bd2d9b047ad76d526e427054224496aa8abb4a09d50c5b645da64c4c8f3410"} err="failed to get container status \"f2bd2d9b047ad76d526e427054224496aa8abb4a09d50c5b645da64c4c8f3410\": rpc error: code = NotFound desc = could not find container \"f2bd2d9b047ad76d526e427054224496aa8abb4a09d50c5b645da64c4c8f3410\": container with ID starting with f2bd2d9b047ad76d526e427054224496aa8abb4a09d50c5b645da64c4c8f3410 not found: ID does not exist" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.879945 5016 scope.go:117] "RemoveContainer" containerID="96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.883239 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5db558bd57-lsm65"] Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.894449 5016 scope.go:117] "RemoveContainer" containerID="1aa80eb5a334fb04c2d4caad344fa5bc35a2f4815cd89e472726de20a9f109dc" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.909918 5016 scope.go:117] "RemoveContainer" containerID="8ada3a6b0f9bf46f5715860770521b5f303a501d0e0a332446f52bc00a63d9d4" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.926968 5016 scope.go:117] "RemoveContainer" containerID="96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261" Dec 11 10:42:01 crc kubenswrapper[5016]: E1211 10:42:01.927596 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261\": container with ID starting with 96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261 not found: ID does not exist" containerID="96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.927667 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261"} err="failed to get container status \"96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261\": rpc error: code = NotFound desc = could not find container \"96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261\": container with ID starting with 96fe4091526d60f6eef67e97e4a5630f6418e0066b556329a27141af8d67b261 not found: ID does not exist" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.927718 5016 scope.go:117] "RemoveContainer" containerID="1aa80eb5a334fb04c2d4caad344fa5bc35a2f4815cd89e472726de20a9f109dc" Dec 11 10:42:01 crc kubenswrapper[5016]: E1211 10:42:01.928293 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1aa80eb5a334fb04c2d4caad344fa5bc35a2f4815cd89e472726de20a9f109dc\": container with ID starting with 1aa80eb5a334fb04c2d4caad344fa5bc35a2f4815cd89e472726de20a9f109dc not found: ID does not exist" containerID="1aa80eb5a334fb04c2d4caad344fa5bc35a2f4815cd89e472726de20a9f109dc" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.928350 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1aa80eb5a334fb04c2d4caad344fa5bc35a2f4815cd89e472726de20a9f109dc"} err="failed to get container status \"1aa80eb5a334fb04c2d4caad344fa5bc35a2f4815cd89e472726de20a9f109dc\": rpc error: code = NotFound desc = could not find container \"1aa80eb5a334fb04c2d4caad344fa5bc35a2f4815cd89e472726de20a9f109dc\": container with ID starting with 1aa80eb5a334fb04c2d4caad344fa5bc35a2f4815cd89e472726de20a9f109dc not found: ID does not exist" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.928387 5016 scope.go:117] "RemoveContainer" containerID="8ada3a6b0f9bf46f5715860770521b5f303a501d0e0a332446f52bc00a63d9d4" Dec 11 10:42:01 crc kubenswrapper[5016]: E1211 10:42:01.928798 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ada3a6b0f9bf46f5715860770521b5f303a501d0e0a332446f52bc00a63d9d4\": container with ID starting with 8ada3a6b0f9bf46f5715860770521b5f303a501d0e0a332446f52bc00a63d9d4 not found: ID does not exist" containerID="8ada3a6b0f9bf46f5715860770521b5f303a501d0e0a332446f52bc00a63d9d4" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.928969 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ada3a6b0f9bf46f5715860770521b5f303a501d0e0a332446f52bc00a63d9d4"} err="failed to get container status \"8ada3a6b0f9bf46f5715860770521b5f303a501d0e0a332446f52bc00a63d9d4\": rpc error: code = NotFound desc = could not find container \"8ada3a6b0f9bf46f5715860770521b5f303a501d0e0a332446f52bc00a63d9d4\": container with ID starting with 8ada3a6b0f9bf46f5715860770521b5f303a501d0e0a332446f52bc00a63d9d4 not found: ID does not exist" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.929113 5016 scope.go:117] "RemoveContainer" containerID="531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.947556 5016 scope.go:117] "RemoveContainer" containerID="531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0" Dec 11 10:42:01 crc kubenswrapper[5016]: E1211 10:42:01.948118 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0\": container with ID starting with 531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0 not found: ID does not exist" containerID="531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0" Dec 11 10:42:01 crc kubenswrapper[5016]: I1211 10:42:01.948161 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0"} err="failed to get container status \"531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0\": rpc error: code = NotFound desc = could not find container \"531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0\": container with ID starting with 531f2c9f90c6105cb5e20d23bcfdd6b73bc86fcb01a77bcde966d40a0e7499b0 not found: ID does not exist" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105505 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-85bb984995-4dzx6"] Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105747 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" containerName="extract-content" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105760 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" containerName="extract-content" Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105768 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" containerName="extract-utilities" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105775 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" containerName="extract-utilities" Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105784 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" containerName="registry-server" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105791 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" containerName="registry-server" Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105801 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" containerName="extract-utilities" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105807 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" containerName="extract-utilities" Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105819 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f994c93-d45f-48da-bb25-c0038b8fcc05" containerName="controller-manager" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105825 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f994c93-d45f-48da-bb25-c0038b8fcc05" containerName="controller-manager" Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105836 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" containerName="extract-content" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105843 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" containerName="extract-content" Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105854 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" containerName="registry-server" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105861 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" containerName="registry-server" Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105872 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" containerName="extract-utilities" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105879 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" containerName="extract-utilities" Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105890 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" containerName="registry-server" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105896 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" containerName="registry-server" Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105908 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" containerName="extract-utilities" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105914 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" containerName="extract-utilities" Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105922 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" containerName="extract-content" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105929 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" containerName="extract-content" Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105944 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" containerName="extract-content" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105970 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" containerName="extract-content" Dec 11 10:42:02 crc kubenswrapper[5016]: E1211 10:42:02.105979 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" containerName="registry-server" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.105986 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" containerName="registry-server" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.106124 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" containerName="registry-server" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.106137 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="623ddc04-83e2-42ac-bcac-59b72d2fac2a" containerName="registry-server" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.106149 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" containerName="registry-server" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.106165 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f994c93-d45f-48da-bb25-c0038b8fcc05" containerName="controller-manager" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.106175 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c302ed6-e9da-46bb-aec8-e0e0e4c1dae4" containerName="registry-server" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.106655 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.109149 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.109414 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.110792 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.110831 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.110833 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.113592 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.117732 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.124605 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-85bb984995-4dzx6"] Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.161239 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/363094a9-901f-436c-b275-d14dd079c826-config\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.161284 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzxdk\" (UniqueName: \"kubernetes.io/projected/363094a9-901f-436c-b275-d14dd079c826-kube-api-access-pzxdk\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.161355 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/363094a9-901f-436c-b275-d14dd079c826-client-ca\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.161388 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/363094a9-901f-436c-b275-d14dd079c826-serving-cert\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.161449 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/363094a9-901f-436c-b275-d14dd079c826-proxy-ca-bundles\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.262997 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/363094a9-901f-436c-b275-d14dd079c826-proxy-ca-bundles\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.263058 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/363094a9-901f-436c-b275-d14dd079c826-config\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.263084 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzxdk\" (UniqueName: \"kubernetes.io/projected/363094a9-901f-436c-b275-d14dd079c826-kube-api-access-pzxdk\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.263117 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/363094a9-901f-436c-b275-d14dd079c826-client-ca\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.263142 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/363094a9-901f-436c-b275-d14dd079c826-serving-cert\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.266100 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/363094a9-901f-436c-b275-d14dd079c826-client-ca\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.266224 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/363094a9-901f-436c-b275-d14dd079c826-config\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.267227 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/363094a9-901f-436c-b275-d14dd079c826-proxy-ca-bundles\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.280536 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/363094a9-901f-436c-b275-d14dd079c826-serving-cert\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.291794 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzxdk\" (UniqueName: \"kubernetes.io/projected/363094a9-901f-436c-b275-d14dd079c826-kube-api-access-pzxdk\") pod \"controller-manager-85bb984995-4dzx6\" (UID: \"363094a9-901f-436c-b275-d14dd079c826\") " pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.424303 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.648110 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-85bb984995-4dzx6"] Dec 11 10:42:02 crc kubenswrapper[5016]: I1211 10:42:02.823887 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" event={"ID":"363094a9-901f-436c-b275-d14dd079c826","Type":"ContainerStarted","Data":"74975c4947eba62edebd8f39da05c07fdd34c6032807cc87651b26d3edd932ef"} Dec 11 10:42:03 crc kubenswrapper[5016]: I1211 10:42:03.482448 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89fda315-d1f2-484a-aa91-ec75f0b0227e" path="/var/lib/kubelet/pods/89fda315-d1f2-484a-aa91-ec75f0b0227e/volumes" Dec 11 10:42:03 crc kubenswrapper[5016]: I1211 10:42:03.483668 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f994c93-d45f-48da-bb25-c0038b8fcc05" path="/var/lib/kubelet/pods/8f994c93-d45f-48da-bb25-c0038b8fcc05/volumes" Dec 11 10:42:03 crc kubenswrapper[5016]: I1211 10:42:03.484228 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edb91373-b8a5-4426-9a6b-1fbb6c9f2846" path="/var/lib/kubelet/pods/edb91373-b8a5-4426-9a6b-1fbb6c9f2846/volumes" Dec 11 10:42:03 crc kubenswrapper[5016]: I1211 10:42:03.848308 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" event={"ID":"363094a9-901f-436c-b275-d14dd079c826","Type":"ContainerStarted","Data":"44e999619ba2b7690a8d7af89f3e1ca98024d9d5f5bf9e589cc70a55416ec656"} Dec 11 10:42:03 crc kubenswrapper[5016]: I1211 10:42:03.849901 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:03 crc kubenswrapper[5016]: I1211 10:42:03.856060 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" Dec 11 10:42:03 crc kubenswrapper[5016]: I1211 10:42:03.870181 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-85bb984995-4dzx6" podStartSLOduration=3.870148351 podStartE2EDuration="3.870148351s" podCreationTimestamp="2025-12-11 10:42:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:42:03.865681082 +0000 UTC m=+440.684240721" watchObservedRunningTime="2025-12-11 10:42:03.870148351 +0000 UTC m=+440.688707950" Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.567895 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tp5lv"] Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.568874 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tp5lv" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" containerName="registry-server" containerID="cri-o://ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492" gracePeriod=30 Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.571902 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c6sdb"] Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.572277 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-c6sdb" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" containerName="registry-server" containerID="cri-o://45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e" gracePeriod=30 Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.580618 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kp5bk"] Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.581064 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" containerID="cri-o://0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38" gracePeriod=30 Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.592687 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hqmxw"] Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.593264 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hqmxw" podUID="f393088a-dacc-4673-8074-d6be25842a84" containerName="registry-server" containerID="cri-o://d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c" gracePeriod=30 Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.605798 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rx8bv"] Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.606087 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rx8bv" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" containerName="registry-server" containerID="cri-o://ce79c4c37bd26cf001092c437813df70aea50b475de9e346c630fe3376756330" gracePeriod=30 Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.617292 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fqzqf"] Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.622977 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.632625 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fqzqf"] Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.736973 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jt9s\" (UniqueName: \"kubernetes.io/projected/60a5c1c0-450b-4360-b6f5-7380a0a2db4f-kube-api-access-8jt9s\") pod \"marketplace-operator-79b997595-fqzqf\" (UID: \"60a5c1c0-450b-4360-b6f5-7380a0a2db4f\") " pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.737061 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/60a5c1c0-450b-4360-b6f5-7380a0a2db4f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fqzqf\" (UID: \"60a5c1c0-450b-4360-b6f5-7380a0a2db4f\") " pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.737088 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/60a5c1c0-450b-4360-b6f5-7380a0a2db4f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fqzqf\" (UID: \"60a5c1c0-450b-4360-b6f5-7380a0a2db4f\") " pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.838819 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jt9s\" (UniqueName: \"kubernetes.io/projected/60a5c1c0-450b-4360-b6f5-7380a0a2db4f-kube-api-access-8jt9s\") pod \"marketplace-operator-79b997595-fqzqf\" (UID: \"60a5c1c0-450b-4360-b6f5-7380a0a2db4f\") " pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.838891 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/60a5c1c0-450b-4360-b6f5-7380a0a2db4f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fqzqf\" (UID: \"60a5c1c0-450b-4360-b6f5-7380a0a2db4f\") " pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.838908 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/60a5c1c0-450b-4360-b6f5-7380a0a2db4f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fqzqf\" (UID: \"60a5c1c0-450b-4360-b6f5-7380a0a2db4f\") " pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.840203 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/60a5c1c0-450b-4360-b6f5-7380a0a2db4f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fqzqf\" (UID: \"60a5c1c0-450b-4360-b6f5-7380a0a2db4f\") " pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.845517 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/60a5c1c0-450b-4360-b6f5-7380a0a2db4f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fqzqf\" (UID: \"60a5c1c0-450b-4360-b6f5-7380a0a2db4f\") " pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.856793 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jt9s\" (UniqueName: \"kubernetes.io/projected/60a5c1c0-450b-4360-b6f5-7380a0a2db4f-kube-api-access-8jt9s\") pod \"marketplace-operator-79b997595-fqzqf\" (UID: \"60a5c1c0-450b-4360-b6f5-7380a0a2db4f\") " pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:07 crc kubenswrapper[5016]: E1211 10:42:07.918231 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c is running failed: container process not found" containerID="d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 10:42:07 crc kubenswrapper[5016]: E1211 10:42:07.918473 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c is running failed: container process not found" containerID="d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 10:42:07 crc kubenswrapper[5016]: E1211 10:42:07.918992 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c is running failed: container process not found" containerID="d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c" cmd=["grpc_health_probe","-addr=:50051"] Dec 11 10:42:07 crc kubenswrapper[5016]: E1211 10:42:07.919029 5016 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-hqmxw" podUID="f393088a-dacc-4673-8074-d6be25842a84" containerName="registry-server" Dec 11 10:42:07 crc kubenswrapper[5016]: I1211 10:42:07.944444 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:08 crc kubenswrapper[5016]: I1211 10:42:08.194786 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fqzqf"] Dec 11 10:42:08 crc kubenswrapper[5016]: W1211 10:42:08.209265 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod60a5c1c0_450b_4360_b6f5_7380a0a2db4f.slice/crio-677e50e10fd8ebaaaddbf1074592d9163c2421a7f68919aa9ed7b254b3c2ae84 WatchSource:0}: Error finding container 677e50e10fd8ebaaaddbf1074592d9163c2421a7f68919aa9ed7b254b3c2ae84: Status 404 returned error can't find the container with id 677e50e10fd8ebaaaddbf1074592d9163c2421a7f68919aa9ed7b254b3c2ae84 Dec 11 10:42:08 crc kubenswrapper[5016]: I1211 10:42:08.875270 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" event={"ID":"60a5c1c0-450b-4360-b6f5-7380a0a2db4f","Type":"ContainerStarted","Data":"677e50e10fd8ebaaaddbf1074592d9163c2421a7f68919aa9ed7b254b3c2ae84"} Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.786687 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.862082 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.865276 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-catalog-content\") pod \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\" (UID: \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.865325 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6n2x\" (UniqueName: \"kubernetes.io/projected/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-kube-api-access-g6n2x\") pod \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\" (UID: \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.865368 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-utilities\") pod \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\" (UID: \"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.866579 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-utilities" (OuterVolumeSpecName: "utilities") pod "9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" (UID: "9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.867844 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/3.log" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.867924 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.871439 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-kube-api-access-g6n2x" (OuterVolumeSpecName: "kube-api-access-g6n2x") pod "9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" (UID: "9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7"). InnerVolumeSpecName "kube-api-access-g6n2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.890167 5016 generic.go:334] "Generic (PLEG): container finished" podID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" containerID="45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e" exitCode=0 Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.890245 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6sdb" event={"ID":"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7","Type":"ContainerDied","Data":"45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e"} Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.890274 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6sdb" event={"ID":"9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7","Type":"ContainerDied","Data":"4a93dbeda8cea431290536d59a21661e49598f9b7708a5e611ae319bf2757f73"} Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.890294 5016 scope.go:117] "RemoveContainer" containerID="45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.890428 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c6sdb" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.904895 5016 generic.go:334] "Generic (PLEG): container finished" podID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" containerID="ce79c4c37bd26cf001092c437813df70aea50b475de9e346c630fe3376756330" exitCode=0 Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.905071 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rx8bv" event={"ID":"2f07c0be-3ff2-4b4a-86f1-67da5394f101","Type":"ContainerDied","Data":"ce79c4c37bd26cf001092c437813df70aea50b475de9e346c630fe3376756330"} Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.908471 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" event={"ID":"60a5c1c0-450b-4360-b6f5-7380a0a2db4f","Type":"ContainerStarted","Data":"6ceb8490a094eaca3c9cb91de3aefcd960a87a0fbddbef1e021987761d5c7bea"} Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.909469 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.909725 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.909737 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.913826 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kp5bk_d8539d49-e453-4b15-a4d6-0e0583b93390/marketplace-operator/3.log" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.913864 5016 generic.go:334] "Generic (PLEG): container finished" podID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerID="0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38" exitCode=0 Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.913913 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" event={"ID":"d8539d49-e453-4b15-a4d6-0e0583b93390","Type":"ContainerDied","Data":"0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38"} Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.913952 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" event={"ID":"d8539d49-e453-4b15-a4d6-0e0583b93390","Type":"ContainerDied","Data":"c14b78520c7ea8e84e262157bd24dd17e9a6458f6d2f06a0084cb7eef9157778"} Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.914002 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kp5bk" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.919054 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.922302 5016 generic.go:334] "Generic (PLEG): container finished" podID="7242e8c3-6ed6-4613-8fc9-1339be494e56" containerID="ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492" exitCode=0 Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.922382 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tp5lv" event={"ID":"7242e8c3-6ed6-4613-8fc9-1339be494e56","Type":"ContainerDied","Data":"ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492"} Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.922559 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tp5lv" event={"ID":"7242e8c3-6ed6-4613-8fc9-1339be494e56","Type":"ContainerDied","Data":"8d609d97af6c76a71e621a36eb0291d626e442f43edaccf9915e8184aa7d664e"} Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.922421 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tp5lv" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.924384 5016 scope.go:117] "RemoveContainer" containerID="b5717445e8a0a0a5666d17fa7c7d1a4950c927eafdac25bf230cfaa389b392fc" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.932843 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-fqzqf" podStartSLOduration=2.932820419 podStartE2EDuration="2.932820419s" podCreationTimestamp="2025-12-11 10:42:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:42:09.927734784 +0000 UTC m=+446.746294393" watchObservedRunningTime="2025-12-11 10:42:09.932820419 +0000 UTC m=+446.751379998" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.948136 5016 generic.go:334] "Generic (PLEG): container finished" podID="f393088a-dacc-4673-8074-d6be25842a84" containerID="d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c" exitCode=0 Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.948189 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hqmxw" event={"ID":"f393088a-dacc-4673-8074-d6be25842a84","Type":"ContainerDied","Data":"d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c"} Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.948232 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hqmxw" event={"ID":"f393088a-dacc-4673-8074-d6be25842a84","Type":"ContainerDied","Data":"307bb5b083621eb15ca65bac079066523d83b4989fd45c9448ed08fb99078fbd"} Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.948342 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hqmxw" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.963469 5016 scope.go:117] "RemoveContainer" containerID="60b6b08c0d83f6b3f2b5afa6ab72aee259cd39dfcbf1e63c535917fd9155e570" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.968362 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7242e8c3-6ed6-4613-8fc9-1339be494e56-utilities\") pod \"7242e8c3-6ed6-4613-8fc9-1339be494e56\" (UID: \"7242e8c3-6ed6-4613-8fc9-1339be494e56\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.968424 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d8539d49-e453-4b15-a4d6-0e0583b93390-marketplace-operator-metrics\") pod \"d8539d49-e453-4b15-a4d6-0e0583b93390\" (UID: \"d8539d49-e453-4b15-a4d6-0e0583b93390\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.969350 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d8539d49-e453-4b15-a4d6-0e0583b93390-marketplace-trusted-ca\") pod \"d8539d49-e453-4b15-a4d6-0e0583b93390\" (UID: \"d8539d49-e453-4b15-a4d6-0e0583b93390\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.969476 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gc7ml\" (UniqueName: \"kubernetes.io/projected/f393088a-dacc-4673-8074-d6be25842a84-kube-api-access-gc7ml\") pod \"f393088a-dacc-4673-8074-d6be25842a84\" (UID: \"f393088a-dacc-4673-8074-d6be25842a84\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.969592 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f07c0be-3ff2-4b4a-86f1-67da5394f101-utilities\") pod \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\" (UID: \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.969696 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f393088a-dacc-4673-8074-d6be25842a84-catalog-content\") pod \"f393088a-dacc-4673-8074-d6be25842a84\" (UID: \"f393088a-dacc-4673-8074-d6be25842a84\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.969859 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4jhp\" (UniqueName: \"kubernetes.io/projected/2f07c0be-3ff2-4b4a-86f1-67da5394f101-kube-api-access-n4jhp\") pod \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\" (UID: \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.970051 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f07c0be-3ff2-4b4a-86f1-67da5394f101-catalog-content\") pod \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\" (UID: \"2f07c0be-3ff2-4b4a-86f1-67da5394f101\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.970210 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7242e8c3-6ed6-4613-8fc9-1339be494e56-catalog-content\") pod \"7242e8c3-6ed6-4613-8fc9-1339be494e56\" (UID: \"7242e8c3-6ed6-4613-8fc9-1339be494e56\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.970355 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6kq9n\" (UniqueName: \"kubernetes.io/projected/d8539d49-e453-4b15-a4d6-0e0583b93390-kube-api-access-6kq9n\") pod \"d8539d49-e453-4b15-a4d6-0e0583b93390\" (UID: \"d8539d49-e453-4b15-a4d6-0e0583b93390\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.970523 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f393088a-dacc-4673-8074-d6be25842a84-utilities\") pod \"f393088a-dacc-4673-8074-d6be25842a84\" (UID: \"f393088a-dacc-4673-8074-d6be25842a84\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.970654 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tm8r2\" (UniqueName: \"kubernetes.io/projected/7242e8c3-6ed6-4613-8fc9-1339be494e56-kube-api-access-tm8r2\") pod \"7242e8c3-6ed6-4613-8fc9-1339be494e56\" (UID: \"7242e8c3-6ed6-4613-8fc9-1339be494e56\") " Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.971546 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6n2x\" (UniqueName: \"kubernetes.io/projected/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-kube-api-access-g6n2x\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.971657 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.980371 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8539d49-e453-4b15-a4d6-0e0583b93390-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "d8539d49-e453-4b15-a4d6-0e0583b93390" (UID: "d8539d49-e453-4b15-a4d6-0e0583b93390"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.980926 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7242e8c3-6ed6-4613-8fc9-1339be494e56-utilities" (OuterVolumeSpecName: "utilities") pod "7242e8c3-6ed6-4613-8fc9-1339be494e56" (UID: "7242e8c3-6ed6-4613-8fc9-1339be494e56"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.984235 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f07c0be-3ff2-4b4a-86f1-67da5394f101-utilities" (OuterVolumeSpecName: "utilities") pod "2f07c0be-3ff2-4b4a-86f1-67da5394f101" (UID: "2f07c0be-3ff2-4b4a-86f1-67da5394f101"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.985038 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8539d49-e453-4b15-a4d6-0e0583b93390-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "d8539d49-e453-4b15-a4d6-0e0583b93390" (UID: "d8539d49-e453-4b15-a4d6-0e0583b93390"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.986130 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f07c0be-3ff2-4b4a-86f1-67da5394f101-kube-api-access-n4jhp" (OuterVolumeSpecName: "kube-api-access-n4jhp") pod "2f07c0be-3ff2-4b4a-86f1-67da5394f101" (UID: "2f07c0be-3ff2-4b4a-86f1-67da5394f101"). InnerVolumeSpecName "kube-api-access-n4jhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.986171 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f393088a-dacc-4673-8074-d6be25842a84-kube-api-access-gc7ml" (OuterVolumeSpecName: "kube-api-access-gc7ml") pod "f393088a-dacc-4673-8074-d6be25842a84" (UID: "f393088a-dacc-4673-8074-d6be25842a84"). InnerVolumeSpecName "kube-api-access-gc7ml". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.990236 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f393088a-dacc-4673-8074-d6be25842a84-utilities" (OuterVolumeSpecName: "utilities") pod "f393088a-dacc-4673-8074-d6be25842a84" (UID: "f393088a-dacc-4673-8074-d6be25842a84"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.994143 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7242e8c3-6ed6-4613-8fc9-1339be494e56-kube-api-access-tm8r2" (OuterVolumeSpecName: "kube-api-access-tm8r2") pod "7242e8c3-6ed6-4613-8fc9-1339be494e56" (UID: "7242e8c3-6ed6-4613-8fc9-1339be494e56"). InnerVolumeSpecName "kube-api-access-tm8r2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.995228 5016 scope.go:117] "RemoveContainer" containerID="45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.996372 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8539d49-e453-4b15-a4d6-0e0583b93390-kube-api-access-6kq9n" (OuterVolumeSpecName: "kube-api-access-6kq9n") pod "d8539d49-e453-4b15-a4d6-0e0583b93390" (UID: "d8539d49-e453-4b15-a4d6-0e0583b93390"). InnerVolumeSpecName "kube-api-access-6kq9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:42:09 crc kubenswrapper[5016]: E1211 10:42:09.997197 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e\": container with ID starting with 45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e not found: ID does not exist" containerID="45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.997258 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e"} err="failed to get container status \"45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e\": rpc error: code = NotFound desc = could not find container \"45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e\": container with ID starting with 45907d0809f0a6b7a9500a212aa2c113b7b9cfec75a0f3d90fd25ee7a05dd89e not found: ID does not exist" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.997283 5016 scope.go:117] "RemoveContainer" containerID="b5717445e8a0a0a5666d17fa7c7d1a4950c927eafdac25bf230cfaa389b392fc" Dec 11 10:42:09 crc kubenswrapper[5016]: E1211 10:42:09.999048 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5717445e8a0a0a5666d17fa7c7d1a4950c927eafdac25bf230cfaa389b392fc\": container with ID starting with b5717445e8a0a0a5666d17fa7c7d1a4950c927eafdac25bf230cfaa389b392fc not found: ID does not exist" containerID="b5717445e8a0a0a5666d17fa7c7d1a4950c927eafdac25bf230cfaa389b392fc" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.999111 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5717445e8a0a0a5666d17fa7c7d1a4950c927eafdac25bf230cfaa389b392fc"} err="failed to get container status \"b5717445e8a0a0a5666d17fa7c7d1a4950c927eafdac25bf230cfaa389b392fc\": rpc error: code = NotFound desc = could not find container \"b5717445e8a0a0a5666d17fa7c7d1a4950c927eafdac25bf230cfaa389b392fc\": container with ID starting with b5717445e8a0a0a5666d17fa7c7d1a4950c927eafdac25bf230cfaa389b392fc not found: ID does not exist" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.999144 5016 scope.go:117] "RemoveContainer" containerID="60b6b08c0d83f6b3f2b5afa6ab72aee259cd39dfcbf1e63c535917fd9155e570" Dec 11 10:42:09 crc kubenswrapper[5016]: E1211 10:42:09.999485 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60b6b08c0d83f6b3f2b5afa6ab72aee259cd39dfcbf1e63c535917fd9155e570\": container with ID starting with 60b6b08c0d83f6b3f2b5afa6ab72aee259cd39dfcbf1e63c535917fd9155e570 not found: ID does not exist" containerID="60b6b08c0d83f6b3f2b5afa6ab72aee259cd39dfcbf1e63c535917fd9155e570" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.999509 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60b6b08c0d83f6b3f2b5afa6ab72aee259cd39dfcbf1e63c535917fd9155e570"} err="failed to get container status \"60b6b08c0d83f6b3f2b5afa6ab72aee259cd39dfcbf1e63c535917fd9155e570\": rpc error: code = NotFound desc = could not find container \"60b6b08c0d83f6b3f2b5afa6ab72aee259cd39dfcbf1e63c535917fd9155e570\": container with ID starting with 60b6b08c0d83f6b3f2b5afa6ab72aee259cd39dfcbf1e63c535917fd9155e570 not found: ID does not exist" Dec 11 10:42:09 crc kubenswrapper[5016]: I1211 10:42:09.999524 5016 scope.go:117] "RemoveContainer" containerID="0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.023070 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" (UID: "9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.037086 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f393088a-dacc-4673-8074-d6be25842a84-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f393088a-dacc-4673-8074-d6be25842a84" (UID: "f393088a-dacc-4673-8074-d6be25842a84"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.041400 5016 scope.go:117] "RemoveContainer" containerID="f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.054670 5016 scope.go:117] "RemoveContainer" containerID="0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38" Dec 11 10:42:10 crc kubenswrapper[5016]: E1211 10:42:10.055326 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38\": container with ID starting with 0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38 not found: ID does not exist" containerID="0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.055364 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38"} err="failed to get container status \"0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38\": rpc error: code = NotFound desc = could not find container \"0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38\": container with ID starting with 0e90bb6cffbace6d578301fcb7b3bd5b87d6d4894782aa70f36f75e32b9cfe38 not found: ID does not exist" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.055390 5016 scope.go:117] "RemoveContainer" containerID="f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65" Dec 11 10:42:10 crc kubenswrapper[5016]: E1211 10:42:10.056200 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65\": container with ID starting with f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65 not found: ID does not exist" containerID="f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.056254 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65"} err="failed to get container status \"f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65\": rpc error: code = NotFound desc = could not find container \"f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65\": container with ID starting with f71527ff64ef026c330810476f2fe10b8c8b6881121816896b09515e8972bb65 not found: ID does not exist" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.056290 5016 scope.go:117] "RemoveContainer" containerID="ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.065302 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7242e8c3-6ed6-4613-8fc9-1339be494e56-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7242e8c3-6ed6-4613-8fc9-1339be494e56" (UID: "7242e8c3-6ed6-4613-8fc9-1339be494e56"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.068709 5016 scope.go:117] "RemoveContainer" containerID="49f36ac691fccb6c64dcb740e006d223ac23b60a5d4657c2f6fec3f0ab151b49" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.074023 5016 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d8539d49-e453-4b15-a4d6-0e0583b93390-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.074073 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7242e8c3-6ed6-4613-8fc9-1339be494e56-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.074095 5016 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d8539d49-e453-4b15-a4d6-0e0583b93390-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.074111 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gc7ml\" (UniqueName: \"kubernetes.io/projected/f393088a-dacc-4673-8074-d6be25842a84-kube-api-access-gc7ml\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.074124 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.074135 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f07c0be-3ff2-4b4a-86f1-67da5394f101-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.074145 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f393088a-dacc-4673-8074-d6be25842a84-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.074156 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4jhp\" (UniqueName: \"kubernetes.io/projected/2f07c0be-3ff2-4b4a-86f1-67da5394f101-kube-api-access-n4jhp\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.074167 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7242e8c3-6ed6-4613-8fc9-1339be494e56-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.074246 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6kq9n\" (UniqueName: \"kubernetes.io/projected/d8539d49-e453-4b15-a4d6-0e0583b93390-kube-api-access-6kq9n\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.074267 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f393088a-dacc-4673-8074-d6be25842a84-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.074276 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tm8r2\" (UniqueName: \"kubernetes.io/projected/7242e8c3-6ed6-4613-8fc9-1339be494e56-kube-api-access-tm8r2\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.104669 5016 scope.go:117] "RemoveContainer" containerID="7748e0cb21700e427bc30f167c4e4654d84bf11b31376fa6c0fcb57f3df3a33a" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.119364 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f07c0be-3ff2-4b4a-86f1-67da5394f101-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2f07c0be-3ff2-4b4a-86f1-67da5394f101" (UID: "2f07c0be-3ff2-4b4a-86f1-67da5394f101"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.121024 5016 scope.go:117] "RemoveContainer" containerID="ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492" Dec 11 10:42:10 crc kubenswrapper[5016]: E1211 10:42:10.121597 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492\": container with ID starting with ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492 not found: ID does not exist" containerID="ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.121645 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492"} err="failed to get container status \"ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492\": rpc error: code = NotFound desc = could not find container \"ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492\": container with ID starting with ed297b07249ee0110b16cdd05e2d99190d612a95d1bfeb1250357015d6cdd492 not found: ID does not exist" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.121673 5016 scope.go:117] "RemoveContainer" containerID="49f36ac691fccb6c64dcb740e006d223ac23b60a5d4657c2f6fec3f0ab151b49" Dec 11 10:42:10 crc kubenswrapper[5016]: E1211 10:42:10.122863 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49f36ac691fccb6c64dcb740e006d223ac23b60a5d4657c2f6fec3f0ab151b49\": container with ID starting with 49f36ac691fccb6c64dcb740e006d223ac23b60a5d4657c2f6fec3f0ab151b49 not found: ID does not exist" containerID="49f36ac691fccb6c64dcb740e006d223ac23b60a5d4657c2f6fec3f0ab151b49" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.122930 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49f36ac691fccb6c64dcb740e006d223ac23b60a5d4657c2f6fec3f0ab151b49"} err="failed to get container status \"49f36ac691fccb6c64dcb740e006d223ac23b60a5d4657c2f6fec3f0ab151b49\": rpc error: code = NotFound desc = could not find container \"49f36ac691fccb6c64dcb740e006d223ac23b60a5d4657c2f6fec3f0ab151b49\": container with ID starting with 49f36ac691fccb6c64dcb740e006d223ac23b60a5d4657c2f6fec3f0ab151b49 not found: ID does not exist" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.123173 5016 scope.go:117] "RemoveContainer" containerID="7748e0cb21700e427bc30f167c4e4654d84bf11b31376fa6c0fcb57f3df3a33a" Dec 11 10:42:10 crc kubenswrapper[5016]: E1211 10:42:10.124058 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7748e0cb21700e427bc30f167c4e4654d84bf11b31376fa6c0fcb57f3df3a33a\": container with ID starting with 7748e0cb21700e427bc30f167c4e4654d84bf11b31376fa6c0fcb57f3df3a33a not found: ID does not exist" containerID="7748e0cb21700e427bc30f167c4e4654d84bf11b31376fa6c0fcb57f3df3a33a" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.124091 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7748e0cb21700e427bc30f167c4e4654d84bf11b31376fa6c0fcb57f3df3a33a"} err="failed to get container status \"7748e0cb21700e427bc30f167c4e4654d84bf11b31376fa6c0fcb57f3df3a33a\": rpc error: code = NotFound desc = could not find container \"7748e0cb21700e427bc30f167c4e4654d84bf11b31376fa6c0fcb57f3df3a33a\": container with ID starting with 7748e0cb21700e427bc30f167c4e4654d84bf11b31376fa6c0fcb57f3df3a33a not found: ID does not exist" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.124129 5016 scope.go:117] "RemoveContainer" containerID="d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.143991 5016 scope.go:117] "RemoveContainer" containerID="d03c7a1c53056a349d7658275d508f3cfd4d2aa81ffeb9d7e8e4319df8a11ac3" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.162639 5016 scope.go:117] "RemoveContainer" containerID="29167e1f7fdca5acf9740a43d23a4cb85769ad7d1f5e6fcbe0f80600e46fd422" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.175474 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f07c0be-3ff2-4b4a-86f1-67da5394f101-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.182161 5016 scope.go:117] "RemoveContainer" containerID="d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c" Dec 11 10:42:10 crc kubenswrapper[5016]: E1211 10:42:10.182675 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c\": container with ID starting with d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c not found: ID does not exist" containerID="d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.182715 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c"} err="failed to get container status \"d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c\": rpc error: code = NotFound desc = could not find container \"d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c\": container with ID starting with d7674fae9fd6cdde2e4103f1189df1b022f4159acff0afdc6a83475dfad6dd2c not found: ID does not exist" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.182741 5016 scope.go:117] "RemoveContainer" containerID="d03c7a1c53056a349d7658275d508f3cfd4d2aa81ffeb9d7e8e4319df8a11ac3" Dec 11 10:42:10 crc kubenswrapper[5016]: E1211 10:42:10.183405 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d03c7a1c53056a349d7658275d508f3cfd4d2aa81ffeb9d7e8e4319df8a11ac3\": container with ID starting with d03c7a1c53056a349d7658275d508f3cfd4d2aa81ffeb9d7e8e4319df8a11ac3 not found: ID does not exist" containerID="d03c7a1c53056a349d7658275d508f3cfd4d2aa81ffeb9d7e8e4319df8a11ac3" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.183470 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d03c7a1c53056a349d7658275d508f3cfd4d2aa81ffeb9d7e8e4319df8a11ac3"} err="failed to get container status \"d03c7a1c53056a349d7658275d508f3cfd4d2aa81ffeb9d7e8e4319df8a11ac3\": rpc error: code = NotFound desc = could not find container \"d03c7a1c53056a349d7658275d508f3cfd4d2aa81ffeb9d7e8e4319df8a11ac3\": container with ID starting with d03c7a1c53056a349d7658275d508f3cfd4d2aa81ffeb9d7e8e4319df8a11ac3 not found: ID does not exist" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.183500 5016 scope.go:117] "RemoveContainer" containerID="29167e1f7fdca5acf9740a43d23a4cb85769ad7d1f5e6fcbe0f80600e46fd422" Dec 11 10:42:10 crc kubenswrapper[5016]: E1211 10:42:10.184362 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29167e1f7fdca5acf9740a43d23a4cb85769ad7d1f5e6fcbe0f80600e46fd422\": container with ID starting with 29167e1f7fdca5acf9740a43d23a4cb85769ad7d1f5e6fcbe0f80600e46fd422 not found: ID does not exist" containerID="29167e1f7fdca5acf9740a43d23a4cb85769ad7d1f5e6fcbe0f80600e46fd422" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.184722 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29167e1f7fdca5acf9740a43d23a4cb85769ad7d1f5e6fcbe0f80600e46fd422"} err="failed to get container status \"29167e1f7fdca5acf9740a43d23a4cb85769ad7d1f5e6fcbe0f80600e46fd422\": rpc error: code = NotFound desc = could not find container \"29167e1f7fdca5acf9740a43d23a4cb85769ad7d1f5e6fcbe0f80600e46fd422\": container with ID starting with 29167e1f7fdca5acf9740a43d23a4cb85769ad7d1f5e6fcbe0f80600e46fd422 not found: ID does not exist" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.224961 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c6sdb"] Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.228519 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-c6sdb"] Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.258119 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kp5bk"] Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.261222 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kp5bk"] Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.274094 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tp5lv"] Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.278275 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tp5lv"] Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.285140 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hqmxw"] Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.289771 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hqmxw"] Dec 11 10:42:10 crc kubenswrapper[5016]: E1211 10:42:10.315889 5016 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8539d49_e453_4b15_a4d6_0e0583b93390.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7242e8c3_6ed6_4613_8fc9_1339be494e56.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8539d49_e453_4b15_a4d6_0e0583b93390.slice/crio-c14b78520c7ea8e84e262157bd24dd17e9a6458f6d2f06a0084cb7eef9157778\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7242e8c3_6ed6_4613_8fc9_1339be494e56.slice/crio-8d609d97af6c76a71e621a36eb0291d626e442f43edaccf9915e8184aa7d664e\": RecentStats: unable to find data in memory cache]" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.958716 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rx8bv" event={"ID":"2f07c0be-3ff2-4b4a-86f1-67da5394f101","Type":"ContainerDied","Data":"c51e5ecbbd36a9a56f608cc8c02fa75beb78930580e8d790fb98a6702442a885"} Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.958773 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rx8bv" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.959107 5016 scope.go:117] "RemoveContainer" containerID="ce79c4c37bd26cf001092c437813df70aea50b475de9e346c630fe3376756330" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.974714 5016 scope.go:117] "RemoveContainer" containerID="1c79ed5e5e8ae4c87ce81651d95475d7326df4f01182b3b46ae35a94f899301c" Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.990467 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rx8bv"] Dec 11 10:42:10 crc kubenswrapper[5016]: I1211 10:42:10.993458 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rx8bv"] Dec 11 10:42:11 crc kubenswrapper[5016]: I1211 10:42:11.014925 5016 scope.go:117] "RemoveContainer" containerID="5784e5504e28ad0d0917a0bc4f0bd4afcfa36d4c9f3fcc2bdf500f2454f8c84c" Dec 11 10:42:11 crc kubenswrapper[5016]: I1211 10:42:11.482467 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" path="/var/lib/kubelet/pods/2f07c0be-3ff2-4b4a-86f1-67da5394f101/volumes" Dec 11 10:42:11 crc kubenswrapper[5016]: I1211 10:42:11.483099 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" path="/var/lib/kubelet/pods/7242e8c3-6ed6-4613-8fc9-1339be494e56/volumes" Dec 11 10:42:11 crc kubenswrapper[5016]: I1211 10:42:11.483653 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" path="/var/lib/kubelet/pods/9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7/volumes" Dec 11 10:42:11 crc kubenswrapper[5016]: I1211 10:42:11.485003 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" path="/var/lib/kubelet/pods/d8539d49-e453-4b15-a4d6-0e0583b93390/volumes" Dec 11 10:42:11 crc kubenswrapper[5016]: I1211 10:42:11.485535 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f393088a-dacc-4673-8074-d6be25842a84" path="/var/lib/kubelet/pods/f393088a-dacc-4673-8074-d6be25842a84/volumes" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170165 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pkv6p"] Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170632 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f393088a-dacc-4673-8074-d6be25842a84" containerName="registry-server" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170651 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f393088a-dacc-4673-8074-d6be25842a84" containerName="registry-server" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170666 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170677 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170684 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f393088a-dacc-4673-8074-d6be25842a84" containerName="extract-content" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170692 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f393088a-dacc-4673-8074-d6be25842a84" containerName="extract-content" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170702 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" containerName="extract-content" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170708 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" containerName="extract-content" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170719 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" containerName="extract-utilities" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170727 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" containerName="extract-utilities" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170737 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" containerName="extract-utilities" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170744 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" containerName="extract-utilities" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170753 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" containerName="extract-content" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170760 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" containerName="extract-content" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170770 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" containerName="registry-server" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170777 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" containerName="registry-server" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170786 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f393088a-dacc-4673-8074-d6be25842a84" containerName="extract-utilities" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170796 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f393088a-dacc-4673-8074-d6be25842a84" containerName="extract-utilities" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170806 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170813 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170828 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170835 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170843 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" containerName="extract-content" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170850 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" containerName="extract-content" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170862 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170869 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170877 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170884 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170892 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" containerName="extract-utilities" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170899 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" containerName="extract-utilities" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170912 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" containerName="registry-server" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170922 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" containerName="registry-server" Dec 11 10:42:12 crc kubenswrapper[5016]: E1211 10:42:12.170930 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" containerName="registry-server" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.170960 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" containerName="registry-server" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.171085 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.171093 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="7242e8c3-6ed6-4613-8fc9-1339be494e56" containerName="registry-server" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.171102 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.171112 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f393088a-dacc-4673-8074-d6be25842a84" containerName="registry-server" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.171124 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.171132 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a3909e0-82ef-4ea0-9c6a-f40ebf9375e7" containerName="registry-server" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.171145 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f07c0be-3ff2-4b4a-86f1-67da5394f101" containerName="registry-server" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.171359 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.171569 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8539d49-e453-4b15-a4d6-0e0583b93390" containerName="marketplace-operator" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.172191 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.176036 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.177317 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pkv6p"] Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.303282 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a515f52f-4817-4d70-8545-ea013bdd98f4-catalog-content\") pod \"community-operators-pkv6p\" (UID: \"a515f52f-4817-4d70-8545-ea013bdd98f4\") " pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.303778 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-765zs\" (UniqueName: \"kubernetes.io/projected/a515f52f-4817-4d70-8545-ea013bdd98f4-kube-api-access-765zs\") pod \"community-operators-pkv6p\" (UID: \"a515f52f-4817-4d70-8545-ea013bdd98f4\") " pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.303847 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a515f52f-4817-4d70-8545-ea013bdd98f4-utilities\") pod \"community-operators-pkv6p\" (UID: \"a515f52f-4817-4d70-8545-ea013bdd98f4\") " pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.412498 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a515f52f-4817-4d70-8545-ea013bdd98f4-catalog-content\") pod \"community-operators-pkv6p\" (UID: \"a515f52f-4817-4d70-8545-ea013bdd98f4\") " pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.412565 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-765zs\" (UniqueName: \"kubernetes.io/projected/a515f52f-4817-4d70-8545-ea013bdd98f4-kube-api-access-765zs\") pod \"community-operators-pkv6p\" (UID: \"a515f52f-4817-4d70-8545-ea013bdd98f4\") " pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.412636 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a515f52f-4817-4d70-8545-ea013bdd98f4-utilities\") pod \"community-operators-pkv6p\" (UID: \"a515f52f-4817-4d70-8545-ea013bdd98f4\") " pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.413170 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a515f52f-4817-4d70-8545-ea013bdd98f4-utilities\") pod \"community-operators-pkv6p\" (UID: \"a515f52f-4817-4d70-8545-ea013bdd98f4\") " pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.413428 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a515f52f-4817-4d70-8545-ea013bdd98f4-catalog-content\") pod \"community-operators-pkv6p\" (UID: \"a515f52f-4817-4d70-8545-ea013bdd98f4\") " pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.470528 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-765zs\" (UniqueName: \"kubernetes.io/projected/a515f52f-4817-4d70-8545-ea013bdd98f4-kube-api-access-765zs\") pod \"community-operators-pkv6p\" (UID: \"a515f52f-4817-4d70-8545-ea013bdd98f4\") " pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.490480 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.693890 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pkv6p"] Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.932828 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.932955 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.933029 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.933858 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"793991a7e6d358bd8fbd2f0bae8254371015f24f8ff9bca5c69c392121b0afd1"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.933930 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://793991a7e6d358bd8fbd2f0bae8254371015f24f8ff9bca5c69c392121b0afd1" gracePeriod=600 Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.974467 5016 generic.go:334] "Generic (PLEG): container finished" podID="a515f52f-4817-4d70-8545-ea013bdd98f4" containerID="97d79312f5de21262f2268605e99251bdd809e8d0e93e4138e17b88436cab90d" exitCode=0 Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.974529 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pkv6p" event={"ID":"a515f52f-4817-4d70-8545-ea013bdd98f4","Type":"ContainerDied","Data":"97d79312f5de21262f2268605e99251bdd809e8d0e93e4138e17b88436cab90d"} Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.974561 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pkv6p" event={"ID":"a515f52f-4817-4d70-8545-ea013bdd98f4","Type":"ContainerStarted","Data":"b041601985fb828af63a87fe6342c7998e89c8d03ee502a8237bee20be20444b"} Dec 11 10:42:12 crc kubenswrapper[5016]: I1211 10:42:12.977575 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.161858 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vrh9v"] Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.163649 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.166120 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.182601 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vrh9v"] Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.225010 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d896058c-2d6d-47e8-b1fc-d0b68de8098e-catalog-content\") pod \"certified-operators-vrh9v\" (UID: \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\") " pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.225052 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phgds\" (UniqueName: \"kubernetes.io/projected/d896058c-2d6d-47e8-b1fc-d0b68de8098e-kube-api-access-phgds\") pod \"certified-operators-vrh9v\" (UID: \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\") " pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.225113 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d896058c-2d6d-47e8-b1fc-d0b68de8098e-utilities\") pod \"certified-operators-vrh9v\" (UID: \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\") " pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.327692 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d896058c-2d6d-47e8-b1fc-d0b68de8098e-catalog-content\") pod \"certified-operators-vrh9v\" (UID: \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\") " pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.328230 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phgds\" (UniqueName: \"kubernetes.io/projected/d896058c-2d6d-47e8-b1fc-d0b68de8098e-kube-api-access-phgds\") pod \"certified-operators-vrh9v\" (UID: \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\") " pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.328382 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d896058c-2d6d-47e8-b1fc-d0b68de8098e-utilities\") pod \"certified-operators-vrh9v\" (UID: \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\") " pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.328241 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d896058c-2d6d-47e8-b1fc-d0b68de8098e-catalog-content\") pod \"certified-operators-vrh9v\" (UID: \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\") " pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.328873 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d896058c-2d6d-47e8-b1fc-d0b68de8098e-utilities\") pod \"certified-operators-vrh9v\" (UID: \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\") " pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.349795 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phgds\" (UniqueName: \"kubernetes.io/projected/d896058c-2d6d-47e8-b1fc-d0b68de8098e-kube-api-access-phgds\") pod \"certified-operators-vrh9v\" (UID: \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\") " pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.486675 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.737833 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vrh9v"] Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.984053 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="793991a7e6d358bd8fbd2f0bae8254371015f24f8ff9bca5c69c392121b0afd1" exitCode=0 Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.984130 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"793991a7e6d358bd8fbd2f0bae8254371015f24f8ff9bca5c69c392121b0afd1"} Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.984202 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"61d3252a2d684020f84ac3017dc378ce04486bdbb0fae848d8fd9fafa07cbdba"} Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.984243 5016 scope.go:117] "RemoveContainer" containerID="b7028fe427b7682d3e5b7f2a5e7fedee9c12ebb5f609d4c361ba3d5fed28bee0" Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.986069 5016 generic.go:334] "Generic (PLEG): container finished" podID="d896058c-2d6d-47e8-b1fc-d0b68de8098e" containerID="345cb63392099b0df98d1469e7ca0cab0bbb7640d884ae7ddfe84accf3cab7e6" exitCode=0 Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.986100 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrh9v" event={"ID":"d896058c-2d6d-47e8-b1fc-d0b68de8098e","Type":"ContainerDied","Data":"345cb63392099b0df98d1469e7ca0cab0bbb7640d884ae7ddfe84accf3cab7e6"} Dec 11 10:42:13 crc kubenswrapper[5016]: I1211 10:42:13.986119 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrh9v" event={"ID":"d896058c-2d6d-47e8-b1fc-d0b68de8098e","Type":"ContainerStarted","Data":"520eba003862dce66fda661a88742f1fe89fc127826dcf0ab984f6d906e19ece"} Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.565822 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-khfpb"] Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.567597 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.569608 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-khfpb"] Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.570080 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.648493 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22d65e49-69a1-4e26-bc1c-52bab4fc01ff-catalog-content\") pod \"redhat-marketplace-khfpb\" (UID: \"22d65e49-69a1-4e26-bc1c-52bab4fc01ff\") " pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.648539 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22d65e49-69a1-4e26-bc1c-52bab4fc01ff-utilities\") pod \"redhat-marketplace-khfpb\" (UID: \"22d65e49-69a1-4e26-bc1c-52bab4fc01ff\") " pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.648587 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb8s9\" (UniqueName: \"kubernetes.io/projected/22d65e49-69a1-4e26-bc1c-52bab4fc01ff-kube-api-access-zb8s9\") pod \"redhat-marketplace-khfpb\" (UID: \"22d65e49-69a1-4e26-bc1c-52bab4fc01ff\") " pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.749905 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22d65e49-69a1-4e26-bc1c-52bab4fc01ff-utilities\") pod \"redhat-marketplace-khfpb\" (UID: \"22d65e49-69a1-4e26-bc1c-52bab4fc01ff\") " pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.750019 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb8s9\" (UniqueName: \"kubernetes.io/projected/22d65e49-69a1-4e26-bc1c-52bab4fc01ff-kube-api-access-zb8s9\") pod \"redhat-marketplace-khfpb\" (UID: \"22d65e49-69a1-4e26-bc1c-52bab4fc01ff\") " pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.750066 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22d65e49-69a1-4e26-bc1c-52bab4fc01ff-catalog-content\") pod \"redhat-marketplace-khfpb\" (UID: \"22d65e49-69a1-4e26-bc1c-52bab4fc01ff\") " pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.750464 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22d65e49-69a1-4e26-bc1c-52bab4fc01ff-utilities\") pod \"redhat-marketplace-khfpb\" (UID: \"22d65e49-69a1-4e26-bc1c-52bab4fc01ff\") " pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.750489 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22d65e49-69a1-4e26-bc1c-52bab4fc01ff-catalog-content\") pod \"redhat-marketplace-khfpb\" (UID: \"22d65e49-69a1-4e26-bc1c-52bab4fc01ff\") " pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.777620 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb8s9\" (UniqueName: \"kubernetes.io/projected/22d65e49-69a1-4e26-bc1c-52bab4fc01ff-kube-api-access-zb8s9\") pod \"redhat-marketplace-khfpb\" (UID: \"22d65e49-69a1-4e26-bc1c-52bab4fc01ff\") " pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:14 crc kubenswrapper[5016]: I1211 10:42:14.891186 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.000141 5016 generic.go:334] "Generic (PLEG): container finished" podID="a515f52f-4817-4d70-8545-ea013bdd98f4" containerID="535eb48fcea3184f731f0318a72db43b62c821f49ecb8a4129ade5aad0ed9fb6" exitCode=0 Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.000201 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pkv6p" event={"ID":"a515f52f-4817-4d70-8545-ea013bdd98f4","Type":"ContainerDied","Data":"535eb48fcea3184f731f0318a72db43b62c821f49ecb8a4129ade5aad0ed9fb6"} Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.321100 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-khfpb"] Dec 11 10:42:15 crc kubenswrapper[5016]: W1211 10:42:15.326196 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod22d65e49_69a1_4e26_bc1c_52bab4fc01ff.slice/crio-dd15fdbc7ae048279acb9c702fe117cd879aa60f47c27ae2b21f93f8d4d80580 WatchSource:0}: Error finding container dd15fdbc7ae048279acb9c702fe117cd879aa60f47c27ae2b21f93f8d4d80580: Status 404 returned error can't find the container with id dd15fdbc7ae048279acb9c702fe117cd879aa60f47c27ae2b21f93f8d4d80580 Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.557278 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6tbch"] Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.558957 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.566543 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.571877 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6tbch"] Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.668424 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hwtm\" (UniqueName: \"kubernetes.io/projected/801d4e82-9cd5-4795-9363-b4eca6f2189e-kube-api-access-8hwtm\") pod \"redhat-operators-6tbch\" (UID: \"801d4e82-9cd5-4795-9363-b4eca6f2189e\") " pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.668493 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/801d4e82-9cd5-4795-9363-b4eca6f2189e-catalog-content\") pod \"redhat-operators-6tbch\" (UID: \"801d4e82-9cd5-4795-9363-b4eca6f2189e\") " pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.668558 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/801d4e82-9cd5-4795-9363-b4eca6f2189e-utilities\") pod \"redhat-operators-6tbch\" (UID: \"801d4e82-9cd5-4795-9363-b4eca6f2189e\") " pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.770339 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hwtm\" (UniqueName: \"kubernetes.io/projected/801d4e82-9cd5-4795-9363-b4eca6f2189e-kube-api-access-8hwtm\") pod \"redhat-operators-6tbch\" (UID: \"801d4e82-9cd5-4795-9363-b4eca6f2189e\") " pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.770415 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/801d4e82-9cd5-4795-9363-b4eca6f2189e-catalog-content\") pod \"redhat-operators-6tbch\" (UID: \"801d4e82-9cd5-4795-9363-b4eca6f2189e\") " pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.770852 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/801d4e82-9cd5-4795-9363-b4eca6f2189e-catalog-content\") pod \"redhat-operators-6tbch\" (UID: \"801d4e82-9cd5-4795-9363-b4eca6f2189e\") " pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.771016 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/801d4e82-9cd5-4795-9363-b4eca6f2189e-utilities\") pod \"redhat-operators-6tbch\" (UID: \"801d4e82-9cd5-4795-9363-b4eca6f2189e\") " pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.771174 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/801d4e82-9cd5-4795-9363-b4eca6f2189e-utilities\") pod \"redhat-operators-6tbch\" (UID: \"801d4e82-9cd5-4795-9363-b4eca6f2189e\") " pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.795815 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hwtm\" (UniqueName: \"kubernetes.io/projected/801d4e82-9cd5-4795-9363-b4eca6f2189e-kube-api-access-8hwtm\") pod \"redhat-operators-6tbch\" (UID: \"801d4e82-9cd5-4795-9363-b4eca6f2189e\") " pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:15 crc kubenswrapper[5016]: I1211 10:42:15.956036 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:16 crc kubenswrapper[5016]: I1211 10:42:16.012788 5016 generic.go:334] "Generic (PLEG): container finished" podID="22d65e49-69a1-4e26-bc1c-52bab4fc01ff" containerID="1c446357a5837c778542b1cd852bfb9eb0fdf66aa9efa489d1f284677b032cf8" exitCode=0 Dec 11 10:42:16 crc kubenswrapper[5016]: I1211 10:42:16.012920 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-khfpb" event={"ID":"22d65e49-69a1-4e26-bc1c-52bab4fc01ff","Type":"ContainerDied","Data":"1c446357a5837c778542b1cd852bfb9eb0fdf66aa9efa489d1f284677b032cf8"} Dec 11 10:42:16 crc kubenswrapper[5016]: I1211 10:42:16.013007 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-khfpb" event={"ID":"22d65e49-69a1-4e26-bc1c-52bab4fc01ff","Type":"ContainerStarted","Data":"dd15fdbc7ae048279acb9c702fe117cd879aa60f47c27ae2b21f93f8d4d80580"} Dec 11 10:42:16 crc kubenswrapper[5016]: I1211 10:42:16.020816 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pkv6p" event={"ID":"a515f52f-4817-4d70-8545-ea013bdd98f4","Type":"ContainerStarted","Data":"95b3636fb8e119dcb74f74df09594ac034659284a7138f7acfcbe95d6f3e4bf5"} Dec 11 10:42:16 crc kubenswrapper[5016]: I1211 10:42:16.023299 5016 generic.go:334] "Generic (PLEG): container finished" podID="d896058c-2d6d-47e8-b1fc-d0b68de8098e" containerID="baec5952fe8989c48fd508c96b928666a88e74d6a7319f67242050bdd0a1e12c" exitCode=0 Dec 11 10:42:16 crc kubenswrapper[5016]: I1211 10:42:16.023351 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrh9v" event={"ID":"d896058c-2d6d-47e8-b1fc-d0b68de8098e","Type":"ContainerDied","Data":"baec5952fe8989c48fd508c96b928666a88e74d6a7319f67242050bdd0a1e12c"} Dec 11 10:42:16 crc kubenswrapper[5016]: I1211 10:42:16.064051 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pkv6p" podStartSLOduration=1.3762575510000001 podStartE2EDuration="4.06403139s" podCreationTimestamp="2025-12-11 10:42:12 +0000 UTC" firstStartedPulling="2025-12-11 10:42:12.977318569 +0000 UTC m=+449.795878148" lastFinishedPulling="2025-12-11 10:42:15.665092408 +0000 UTC m=+452.483651987" observedRunningTime="2025-12-11 10:42:16.062403941 +0000 UTC m=+452.880963540" watchObservedRunningTime="2025-12-11 10:42:16.06403139 +0000 UTC m=+452.882590979" Dec 11 10:42:16 crc kubenswrapper[5016]: I1211 10:42:16.189324 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6tbch"] Dec 11 10:42:17 crc kubenswrapper[5016]: I1211 10:42:17.032723 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrh9v" event={"ID":"d896058c-2d6d-47e8-b1fc-d0b68de8098e","Type":"ContainerStarted","Data":"b8e2eb53287f772715861e9a20cb26d32a42e9030a8076d320ccbd808a1d6f15"} Dec 11 10:42:17 crc kubenswrapper[5016]: I1211 10:42:17.034684 5016 generic.go:334] "Generic (PLEG): container finished" podID="801d4e82-9cd5-4795-9363-b4eca6f2189e" containerID="c8e2a5f227b6b430e2d51305a0fd5b05c74aaa403a2cc0a9581e69640f978a6b" exitCode=0 Dec 11 10:42:17 crc kubenswrapper[5016]: I1211 10:42:17.034738 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6tbch" event={"ID":"801d4e82-9cd5-4795-9363-b4eca6f2189e","Type":"ContainerDied","Data":"c8e2a5f227b6b430e2d51305a0fd5b05c74aaa403a2cc0a9581e69640f978a6b"} Dec 11 10:42:17 crc kubenswrapper[5016]: I1211 10:42:17.034759 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6tbch" event={"ID":"801d4e82-9cd5-4795-9363-b4eca6f2189e","Type":"ContainerStarted","Data":"35d21d9f7fa8a2a3baa0e1e1ce70f8a21b9468656256bdda17c1d7e2573ee290"} Dec 11 10:42:17 crc kubenswrapper[5016]: I1211 10:42:17.038104 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-khfpb" event={"ID":"22d65e49-69a1-4e26-bc1c-52bab4fc01ff","Type":"ContainerStarted","Data":"46ff748d60e4b0cf7338ead727ffc18b64047336b5e6850864a35a0247364b4f"} Dec 11 10:42:17 crc kubenswrapper[5016]: I1211 10:42:17.054397 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vrh9v" podStartSLOduration=1.355497256 podStartE2EDuration="4.054380726s" podCreationTimestamp="2025-12-11 10:42:13 +0000 UTC" firstStartedPulling="2025-12-11 10:42:13.988420221 +0000 UTC m=+450.806979800" lastFinishedPulling="2025-12-11 10:42:16.687303691 +0000 UTC m=+453.505863270" observedRunningTime="2025-12-11 10:42:17.050596283 +0000 UTC m=+453.869155862" watchObservedRunningTime="2025-12-11 10:42:17.054380726 +0000 UTC m=+453.872940305" Dec 11 10:42:18 crc kubenswrapper[5016]: I1211 10:42:18.047199 5016 generic.go:334] "Generic (PLEG): container finished" podID="22d65e49-69a1-4e26-bc1c-52bab4fc01ff" containerID="46ff748d60e4b0cf7338ead727ffc18b64047336b5e6850864a35a0247364b4f" exitCode=0 Dec 11 10:42:18 crc kubenswrapper[5016]: I1211 10:42:18.047410 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-khfpb" event={"ID":"22d65e49-69a1-4e26-bc1c-52bab4fc01ff","Type":"ContainerDied","Data":"46ff748d60e4b0cf7338ead727ffc18b64047336b5e6850864a35a0247364b4f"} Dec 11 10:42:19 crc kubenswrapper[5016]: I1211 10:42:19.054781 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6tbch" event={"ID":"801d4e82-9cd5-4795-9363-b4eca6f2189e","Type":"ContainerStarted","Data":"c16d7b0771317eef8bd3819d923e87e5d4a34be2f1e0ce3076982c12ae8304e7"} Dec 11 10:42:19 crc kubenswrapper[5016]: I1211 10:42:19.061426 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-khfpb" event={"ID":"22d65e49-69a1-4e26-bc1c-52bab4fc01ff","Type":"ContainerStarted","Data":"2f13a191a0f08f2f96c49a9ae9068d9e49e05c39496e2a592f218f996ac6cc6a"} Dec 11 10:42:19 crc kubenswrapper[5016]: I1211 10:42:19.105167 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-khfpb" podStartSLOduration=2.329565214 podStartE2EDuration="5.105148628s" podCreationTimestamp="2025-12-11 10:42:14 +0000 UTC" firstStartedPulling="2025-12-11 10:42:16.015598477 +0000 UTC m=+452.834158056" lastFinishedPulling="2025-12-11 10:42:18.791181891 +0000 UTC m=+455.609741470" observedRunningTime="2025-12-11 10:42:19.101888708 +0000 UTC m=+455.920448297" watchObservedRunningTime="2025-12-11 10:42:19.105148628 +0000 UTC m=+455.923708197" Dec 11 10:42:20 crc kubenswrapper[5016]: I1211 10:42:20.070976 5016 generic.go:334] "Generic (PLEG): container finished" podID="801d4e82-9cd5-4795-9363-b4eca6f2189e" containerID="c16d7b0771317eef8bd3819d923e87e5d4a34be2f1e0ce3076982c12ae8304e7" exitCode=0 Dec 11 10:42:20 crc kubenswrapper[5016]: I1211 10:42:20.071105 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6tbch" event={"ID":"801d4e82-9cd5-4795-9363-b4eca6f2189e","Type":"ContainerDied","Data":"c16d7b0771317eef8bd3819d923e87e5d4a34be2f1e0ce3076982c12ae8304e7"} Dec 11 10:42:20 crc kubenswrapper[5016]: I1211 10:42:20.648072 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96"] Dec 11 10:42:20 crc kubenswrapper[5016]: I1211 10:42:20.648623 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" podUID="dbd16799-2195-4cd1-9794-662c7ac6acec" containerName="route-controller-manager" containerID="cri-o://bb720ab41bc35bbbbceb6f018e54fea64f144a42c77c268d6720ac4331de511b" gracePeriod=30 Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.079149 5016 generic.go:334] "Generic (PLEG): container finished" podID="dbd16799-2195-4cd1-9794-662c7ac6acec" containerID="bb720ab41bc35bbbbceb6f018e54fea64f144a42c77c268d6720ac4331de511b" exitCode=0 Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.079235 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" event={"ID":"dbd16799-2195-4cd1-9794-662c7ac6acec","Type":"ContainerDied","Data":"bb720ab41bc35bbbbceb6f018e54fea64f144a42c77c268d6720ac4331de511b"} Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.081685 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6tbch" event={"ID":"801d4e82-9cd5-4795-9363-b4eca6f2189e","Type":"ContainerStarted","Data":"9575a5383b8eb52f0d8776c25bb41fd72113f1c7106e70bf34b5596dea743b08"} Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.106756 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6tbch" podStartSLOduration=2.211448232 podStartE2EDuration="6.106730899s" podCreationTimestamp="2025-12-11 10:42:15 +0000 UTC" firstStartedPulling="2025-12-11 10:42:17.036235493 +0000 UTC m=+453.854795072" lastFinishedPulling="2025-12-11 10:42:20.93151816 +0000 UTC m=+457.750077739" observedRunningTime="2025-12-11 10:42:21.102324312 +0000 UTC m=+457.920883921" watchObservedRunningTime="2025-12-11 10:42:21.106730899 +0000 UTC m=+457.925290478" Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.557914 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.668969 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbd16799-2195-4cd1-9794-662c7ac6acec-serving-cert\") pod \"dbd16799-2195-4cd1-9794-662c7ac6acec\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.669209 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbd16799-2195-4cd1-9794-662c7ac6acec-config\") pod \"dbd16799-2195-4cd1-9794-662c7ac6acec\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.669248 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dbd16799-2195-4cd1-9794-662c7ac6acec-client-ca\") pod \"dbd16799-2195-4cd1-9794-662c7ac6acec\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.669301 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfhwr\" (UniqueName: \"kubernetes.io/projected/dbd16799-2195-4cd1-9794-662c7ac6acec-kube-api-access-bfhwr\") pod \"dbd16799-2195-4cd1-9794-662c7ac6acec\" (UID: \"dbd16799-2195-4cd1-9794-662c7ac6acec\") " Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.670094 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbd16799-2195-4cd1-9794-662c7ac6acec-client-ca" (OuterVolumeSpecName: "client-ca") pod "dbd16799-2195-4cd1-9794-662c7ac6acec" (UID: "dbd16799-2195-4cd1-9794-662c7ac6acec"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.670115 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbd16799-2195-4cd1-9794-662c7ac6acec-config" (OuterVolumeSpecName: "config") pod "dbd16799-2195-4cd1-9794-662c7ac6acec" (UID: "dbd16799-2195-4cd1-9794-662c7ac6acec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.675449 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbd16799-2195-4cd1-9794-662c7ac6acec-kube-api-access-bfhwr" (OuterVolumeSpecName: "kube-api-access-bfhwr") pod "dbd16799-2195-4cd1-9794-662c7ac6acec" (UID: "dbd16799-2195-4cd1-9794-662c7ac6acec"). InnerVolumeSpecName "kube-api-access-bfhwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.676693 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbd16799-2195-4cd1-9794-662c7ac6acec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "dbd16799-2195-4cd1-9794-662c7ac6acec" (UID: "dbd16799-2195-4cd1-9794-662c7ac6acec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.770800 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfhwr\" (UniqueName: \"kubernetes.io/projected/dbd16799-2195-4cd1-9794-662c7ac6acec-kube-api-access-bfhwr\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.770841 5016 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbd16799-2195-4cd1-9794-662c7ac6acec-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.770856 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbd16799-2195-4cd1-9794-662c7ac6acec-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:21 crc kubenswrapper[5016]: I1211 10:42:21.770869 5016 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dbd16799-2195-4cd1-9794-662c7ac6acec-client-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.089116 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" event={"ID":"dbd16799-2195-4cd1-9794-662c7ac6acec","Type":"ContainerDied","Data":"a29a45c12a785846fb3014e155d54c58f56d3acacaa0fc681dbf1e1380682ce5"} Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.089185 5016 scope.go:117] "RemoveContainer" containerID="bb720ab41bc35bbbbceb6f018e54fea64f144a42c77c268d6720ac4331de511b" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.089180 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.121040 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc"] Dec 11 10:42:22 crc kubenswrapper[5016]: E1211 10:42:22.125543 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbd16799-2195-4cd1-9794-662c7ac6acec" containerName="route-controller-manager" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.125580 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbd16799-2195-4cd1-9794-662c7ac6acec" containerName="route-controller-manager" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.126518 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbd16799-2195-4cd1-9794-662c7ac6acec" containerName="route-controller-manager" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.127096 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.128910 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96"] Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.129293 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.129617 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.129678 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.129806 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.130496 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.132641 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.133881 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69c79dd4cc-98t96"] Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.137503 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc"] Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.276495 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da03e425-8590-4819-8996-f17dd213a3ab-client-ca\") pod \"route-controller-manager-5f5c5bf969-2gxgc\" (UID: \"da03e425-8590-4819-8996-f17dd213a3ab\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.277024 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da03e425-8590-4819-8996-f17dd213a3ab-config\") pod \"route-controller-manager-5f5c5bf969-2gxgc\" (UID: \"da03e425-8590-4819-8996-f17dd213a3ab\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.277148 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jpqn\" (UniqueName: \"kubernetes.io/projected/da03e425-8590-4819-8996-f17dd213a3ab-kube-api-access-5jpqn\") pod \"route-controller-manager-5f5c5bf969-2gxgc\" (UID: \"da03e425-8590-4819-8996-f17dd213a3ab\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.277243 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da03e425-8590-4819-8996-f17dd213a3ab-serving-cert\") pod \"route-controller-manager-5f5c5bf969-2gxgc\" (UID: \"da03e425-8590-4819-8996-f17dd213a3ab\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.378848 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jpqn\" (UniqueName: \"kubernetes.io/projected/da03e425-8590-4819-8996-f17dd213a3ab-kube-api-access-5jpqn\") pod \"route-controller-manager-5f5c5bf969-2gxgc\" (UID: \"da03e425-8590-4819-8996-f17dd213a3ab\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.378918 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da03e425-8590-4819-8996-f17dd213a3ab-serving-cert\") pod \"route-controller-manager-5f5c5bf969-2gxgc\" (UID: \"da03e425-8590-4819-8996-f17dd213a3ab\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.378978 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da03e425-8590-4819-8996-f17dd213a3ab-client-ca\") pod \"route-controller-manager-5f5c5bf969-2gxgc\" (UID: \"da03e425-8590-4819-8996-f17dd213a3ab\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.379020 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da03e425-8590-4819-8996-f17dd213a3ab-config\") pod \"route-controller-manager-5f5c5bf969-2gxgc\" (UID: \"da03e425-8590-4819-8996-f17dd213a3ab\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.380370 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da03e425-8590-4819-8996-f17dd213a3ab-client-ca\") pod \"route-controller-manager-5f5c5bf969-2gxgc\" (UID: \"da03e425-8590-4819-8996-f17dd213a3ab\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.380441 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da03e425-8590-4819-8996-f17dd213a3ab-config\") pod \"route-controller-manager-5f5c5bf969-2gxgc\" (UID: \"da03e425-8590-4819-8996-f17dd213a3ab\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.383653 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da03e425-8590-4819-8996-f17dd213a3ab-serving-cert\") pod \"route-controller-manager-5f5c5bf969-2gxgc\" (UID: \"da03e425-8590-4819-8996-f17dd213a3ab\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.401394 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jpqn\" (UniqueName: \"kubernetes.io/projected/da03e425-8590-4819-8996-f17dd213a3ab-kube-api-access-5jpqn\") pod \"route-controller-manager-5f5c5bf969-2gxgc\" (UID: \"da03e425-8590-4819-8996-f17dd213a3ab\") " pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.454284 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.493118 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.493523 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.534556 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:22 crc kubenswrapper[5016]: W1211 10:42:22.941495 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda03e425_8590_4819_8996_f17dd213a3ab.slice/crio-0ee14516dc6c441242ff63ece1ceca9be5d59eddb00dbf6d7a910defe1caf3ad WatchSource:0}: Error finding container 0ee14516dc6c441242ff63ece1ceca9be5d59eddb00dbf6d7a910defe1caf3ad: Status 404 returned error can't find the container with id 0ee14516dc6c441242ff63ece1ceca9be5d59eddb00dbf6d7a910defe1caf3ad Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.954976 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc"] Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.978705 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-jbjmk"] Dec 11 10:42:22 crc kubenswrapper[5016]: I1211 10:42:22.980080 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.008264 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-jbjmk"] Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.088498 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5bb4a62e-abe6-47a8-9823-3e1d552e451d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.088544 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gwng\" (UniqueName: \"kubernetes.io/projected/5bb4a62e-abe6-47a8-9823-3e1d552e451d-kube-api-access-2gwng\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.088606 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.088634 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5bb4a62e-abe6-47a8-9823-3e1d552e451d-bound-sa-token\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.088658 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5bb4a62e-abe6-47a8-9823-3e1d552e451d-registry-certificates\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.088679 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5bb4a62e-abe6-47a8-9823-3e1d552e451d-registry-tls\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.088801 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bb4a62e-abe6-47a8-9823-3e1d552e451d-trusted-ca\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.088863 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5bb4a62e-abe6-47a8-9823-3e1d552e451d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.098502 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" event={"ID":"da03e425-8590-4819-8996-f17dd213a3ab","Type":"ContainerStarted","Data":"0ee14516dc6c441242ff63ece1ceca9be5d59eddb00dbf6d7a910defe1caf3ad"} Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.112062 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.136854 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pkv6p" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.190521 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gwng\" (UniqueName: \"kubernetes.io/projected/5bb4a62e-abe6-47a8-9823-3e1d552e451d-kube-api-access-2gwng\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.190593 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5bb4a62e-abe6-47a8-9823-3e1d552e451d-bound-sa-token\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.190619 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5bb4a62e-abe6-47a8-9823-3e1d552e451d-registry-certificates\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.190647 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5bb4a62e-abe6-47a8-9823-3e1d552e451d-registry-tls\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.190688 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bb4a62e-abe6-47a8-9823-3e1d552e451d-trusted-ca\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.190714 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5bb4a62e-abe6-47a8-9823-3e1d552e451d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.190740 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5bb4a62e-abe6-47a8-9823-3e1d552e451d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.193514 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5bb4a62e-abe6-47a8-9823-3e1d552e451d-registry-certificates\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.193673 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5bb4a62e-abe6-47a8-9823-3e1d552e451d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.194037 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bb4a62e-abe6-47a8-9823-3e1d552e451d-trusted-ca\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.197707 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5bb4a62e-abe6-47a8-9823-3e1d552e451d-registry-tls\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.204562 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5bb4a62e-abe6-47a8-9823-3e1d552e451d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.215564 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gwng\" (UniqueName: \"kubernetes.io/projected/5bb4a62e-abe6-47a8-9823-3e1d552e451d-kube-api-access-2gwng\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.217418 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5bb4a62e-abe6-47a8-9823-3e1d552e451d-bound-sa-token\") pod \"image-registry-66df7c8f76-jbjmk\" (UID: \"5bb4a62e-abe6-47a8-9823-3e1d552e451d\") " pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.351714 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.509406 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbd16799-2195-4cd1-9794-662c7ac6acec" path="/var/lib/kubelet/pods/dbd16799-2195-4cd1-9794-662c7ac6acec/volumes" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.511486 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.511711 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.539266 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:23 crc kubenswrapper[5016]: W1211 10:42:23.637485 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5bb4a62e_abe6_47a8_9823_3e1d552e451d.slice/crio-050967273c4ec55f3267a36824bf7b2efadc85e576730d7164e1329f1ce279f9 WatchSource:0}: Error finding container 050967273c4ec55f3267a36824bf7b2efadc85e576730d7164e1329f1ce279f9: Status 404 returned error can't find the container with id 050967273c4ec55f3267a36824bf7b2efadc85e576730d7164e1329f1ce279f9 Dec 11 10:42:23 crc kubenswrapper[5016]: I1211 10:42:23.645286 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-jbjmk"] Dec 11 10:42:24 crc kubenswrapper[5016]: I1211 10:42:24.107162 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" event={"ID":"5bb4a62e-abe6-47a8-9823-3e1d552e451d","Type":"ContainerStarted","Data":"963443bf6225dd8d5c7cead0fcd423a2bc37a987196f08f94161cc7d8d044815"} Dec 11 10:42:24 crc kubenswrapper[5016]: I1211 10:42:24.107619 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:24 crc kubenswrapper[5016]: I1211 10:42:24.107655 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" event={"ID":"5bb4a62e-abe6-47a8-9823-3e1d552e451d","Type":"ContainerStarted","Data":"050967273c4ec55f3267a36824bf7b2efadc85e576730d7164e1329f1ce279f9"} Dec 11 10:42:24 crc kubenswrapper[5016]: I1211 10:42:24.109870 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" event={"ID":"da03e425-8590-4819-8996-f17dd213a3ab","Type":"ContainerStarted","Data":"7d9688579515373dedd966f0178dcd201cad8ea7a1933af3bd0666a8cece53ca"} Dec 11 10:42:24 crc kubenswrapper[5016]: I1211 10:42:24.109933 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:24 crc kubenswrapper[5016]: I1211 10:42:24.118588 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" Dec 11 10:42:24 crc kubenswrapper[5016]: I1211 10:42:24.130505 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" podStartSLOduration=2.130482487 podStartE2EDuration="2.130482487s" podCreationTimestamp="2025-12-11 10:42:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:42:24.125169614 +0000 UTC m=+460.943729213" watchObservedRunningTime="2025-12-11 10:42:24.130482487 +0000 UTC m=+460.949042066" Dec 11 10:42:24 crc kubenswrapper[5016]: I1211 10:42:24.151301 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5f5c5bf969-2gxgc" podStartSLOduration=4.151272698 podStartE2EDuration="4.151272698s" podCreationTimestamp="2025-12-11 10:42:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:42:24.146775965 +0000 UTC m=+460.965335574" watchObservedRunningTime="2025-12-11 10:42:24.151272698 +0000 UTC m=+460.969832277" Dec 11 10:42:24 crc kubenswrapper[5016]: I1211 10:42:24.165347 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 10:42:24 crc kubenswrapper[5016]: I1211 10:42:24.892156 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:24 crc kubenswrapper[5016]: I1211 10:42:24.892760 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:24 crc kubenswrapper[5016]: I1211 10:42:24.939092 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:25 crc kubenswrapper[5016]: I1211 10:42:25.161790 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-khfpb" Dec 11 10:42:25 crc kubenswrapper[5016]: I1211 10:42:25.956795 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:25 crc kubenswrapper[5016]: I1211 10:42:25.956861 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:27 crc kubenswrapper[5016]: I1211 10:42:27.000046 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6tbch" podUID="801d4e82-9cd5-4795-9363-b4eca6f2189e" containerName="registry-server" probeResult="failure" output=< Dec 11 10:42:27 crc kubenswrapper[5016]: timeout: failed to connect service ":50051" within 1s Dec 11 10:42:27 crc kubenswrapper[5016]: > Dec 11 10:42:36 crc kubenswrapper[5016]: I1211 10:42:36.008457 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:36 crc kubenswrapper[5016]: I1211 10:42:36.051453 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6tbch" Dec 11 10:42:43 crc kubenswrapper[5016]: I1211 10:42:43.359058 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-jbjmk" Dec 11 10:42:43 crc kubenswrapper[5016]: I1211 10:42:43.418204 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-4cp4w"] Dec 11 10:43:08 crc kubenswrapper[5016]: I1211 10:43:08.457045 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" podUID="84d62237-3910-4eeb-845d-2d9c3c5a8d97" containerName="registry" containerID="cri-o://be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0" gracePeriod=30 Dec 11 10:43:08 crc kubenswrapper[5016]: I1211 10:43:08.853505 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.006575 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/84d62237-3910-4eeb-845d-2d9c3c5a8d97-registry-certificates\") pod \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.006675 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/84d62237-3910-4eeb-845d-2d9c3c5a8d97-installation-pull-secrets\") pod \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.006766 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/84d62237-3910-4eeb-845d-2d9c3c5a8d97-ca-trust-extracted\") pod \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.006796 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/84d62237-3910-4eeb-845d-2d9c3c5a8d97-trusted-ca\") pod \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.006913 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzprh\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-kube-api-access-jzprh\") pod \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.008368 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84d62237-3910-4eeb-845d-2d9c3c5a8d97-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "84d62237-3910-4eeb-845d-2d9c3c5a8d97" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.008485 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-registry-tls\") pod \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.008459 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84d62237-3910-4eeb-845d-2d9c3c5a8d97-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "84d62237-3910-4eeb-845d-2d9c3c5a8d97" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.009043 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.009078 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-bound-sa-token\") pod \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\" (UID: \"84d62237-3910-4eeb-845d-2d9c3c5a8d97\") " Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.009419 5016 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/84d62237-3910-4eeb-845d-2d9c3c5a8d97-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.009437 5016 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/84d62237-3910-4eeb-845d-2d9c3c5a8d97-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.016505 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "84d62237-3910-4eeb-845d-2d9c3c5a8d97" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.016962 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84d62237-3910-4eeb-845d-2d9c3c5a8d97-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "84d62237-3910-4eeb-845d-2d9c3c5a8d97" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.022461 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-kube-api-access-jzprh" (OuterVolumeSpecName: "kube-api-access-jzprh") pod "84d62237-3910-4eeb-845d-2d9c3c5a8d97" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97"). InnerVolumeSpecName "kube-api-access-jzprh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.026963 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "84d62237-3910-4eeb-845d-2d9c3c5a8d97" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.028765 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "84d62237-3910-4eeb-845d-2d9c3c5a8d97" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.044663 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84d62237-3910-4eeb-845d-2d9c3c5a8d97-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "84d62237-3910-4eeb-845d-2d9c3c5a8d97" (UID: "84d62237-3910-4eeb-845d-2d9c3c5a8d97"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.111197 5016 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/84d62237-3910-4eeb-845d-2d9c3c5a8d97-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.111275 5016 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/84d62237-3910-4eeb-845d-2d9c3c5a8d97-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.111303 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzprh\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-kube-api-access-jzprh\") on node \"crc\" DevicePath \"\"" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.111331 5016 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.111351 5016 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/84d62237-3910-4eeb-845d-2d9c3c5a8d97-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.387044 5016 generic.go:334] "Generic (PLEG): container finished" podID="84d62237-3910-4eeb-845d-2d9c3c5a8d97" containerID="be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0" exitCode=0 Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.387134 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.387180 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" event={"ID":"84d62237-3910-4eeb-845d-2d9c3c5a8d97","Type":"ContainerDied","Data":"be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0"} Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.387931 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-4cp4w" event={"ID":"84d62237-3910-4eeb-845d-2d9c3c5a8d97","Type":"ContainerDied","Data":"ecd64465dc43b912604fe9897f9ea3b0a36cec2ccaa959ec1801c434e3e60b9e"} Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.388041 5016 scope.go:117] "RemoveContainer" containerID="be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.415542 5016 scope.go:117] "RemoveContainer" containerID="be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0" Dec 11 10:43:09 crc kubenswrapper[5016]: E1211 10:43:09.416301 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0\": container with ID starting with be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0 not found: ID does not exist" containerID="be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.416361 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0"} err="failed to get container status \"be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0\": rpc error: code = NotFound desc = could not find container \"be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0\": container with ID starting with be0d69944fa47378174281f38d9a2b59876f1a1840c2048d23cb934d2ce98fb0 not found: ID does not exist" Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.432132 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-4cp4w"] Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.436198 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-4cp4w"] Dec 11 10:43:09 crc kubenswrapper[5016]: I1211 10:43:09.482676 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84d62237-3910-4eeb-845d-2d9c3c5a8d97" path="/var/lib/kubelet/pods/84d62237-3910-4eeb-845d-2d9c3c5a8d97/volumes" Dec 11 10:44:42 crc kubenswrapper[5016]: I1211 10:44:42.934117 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:44:42 crc kubenswrapper[5016]: I1211 10:44:42.934527 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.189762 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t"] Dec 11 10:45:00 crc kubenswrapper[5016]: E1211 10:45:00.190589 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84d62237-3910-4eeb-845d-2d9c3c5a8d97" containerName="registry" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.190604 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="84d62237-3910-4eeb-845d-2d9c3c5a8d97" containerName="registry" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.190739 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="84d62237-3910-4eeb-845d-2d9c3c5a8d97" containerName="registry" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.191255 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.193703 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.194299 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.206453 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t"] Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.360048 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f1c3e35-cef5-4190-a349-b4490f3fe796-secret-volume\") pod \"collect-profiles-29424165-qm22t\" (UID: \"2f1c3e35-cef5-4190-a349-b4490f3fe796\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.360181 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwgk7\" (UniqueName: \"kubernetes.io/projected/2f1c3e35-cef5-4190-a349-b4490f3fe796-kube-api-access-nwgk7\") pod \"collect-profiles-29424165-qm22t\" (UID: \"2f1c3e35-cef5-4190-a349-b4490f3fe796\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.360239 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f1c3e35-cef5-4190-a349-b4490f3fe796-config-volume\") pod \"collect-profiles-29424165-qm22t\" (UID: \"2f1c3e35-cef5-4190-a349-b4490f3fe796\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.461608 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f1c3e35-cef5-4190-a349-b4490f3fe796-secret-volume\") pod \"collect-profiles-29424165-qm22t\" (UID: \"2f1c3e35-cef5-4190-a349-b4490f3fe796\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.461736 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwgk7\" (UniqueName: \"kubernetes.io/projected/2f1c3e35-cef5-4190-a349-b4490f3fe796-kube-api-access-nwgk7\") pod \"collect-profiles-29424165-qm22t\" (UID: \"2f1c3e35-cef5-4190-a349-b4490f3fe796\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.461813 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f1c3e35-cef5-4190-a349-b4490f3fe796-config-volume\") pod \"collect-profiles-29424165-qm22t\" (UID: \"2f1c3e35-cef5-4190-a349-b4490f3fe796\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.463491 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f1c3e35-cef5-4190-a349-b4490f3fe796-config-volume\") pod \"collect-profiles-29424165-qm22t\" (UID: \"2f1c3e35-cef5-4190-a349-b4490f3fe796\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.468171 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f1c3e35-cef5-4190-a349-b4490f3fe796-secret-volume\") pod \"collect-profiles-29424165-qm22t\" (UID: \"2f1c3e35-cef5-4190-a349-b4490f3fe796\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.487652 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwgk7\" (UniqueName: \"kubernetes.io/projected/2f1c3e35-cef5-4190-a349-b4490f3fe796-kube-api-access-nwgk7\") pod \"collect-profiles-29424165-qm22t\" (UID: \"2f1c3e35-cef5-4190-a349-b4490f3fe796\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.513196 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:00 crc kubenswrapper[5016]: I1211 10:45:00.903667 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t"] Dec 11 10:45:01 crc kubenswrapper[5016]: I1211 10:45:01.069138 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" event={"ID":"2f1c3e35-cef5-4190-a349-b4490f3fe796","Type":"ContainerStarted","Data":"5061c8763451152d1bc29ed203eabec8cfe0d435a6871967b7f8ad36107e9d68"} Dec 11 10:45:02 crc kubenswrapper[5016]: I1211 10:45:02.078067 5016 generic.go:334] "Generic (PLEG): container finished" podID="2f1c3e35-cef5-4190-a349-b4490f3fe796" containerID="d119c1b272397ca659d78902d5b3912b25e8ec6137dc801785742cdead21bfbf" exitCode=0 Dec 11 10:45:02 crc kubenswrapper[5016]: I1211 10:45:02.078329 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" event={"ID":"2f1c3e35-cef5-4190-a349-b4490f3fe796","Type":"ContainerDied","Data":"d119c1b272397ca659d78902d5b3912b25e8ec6137dc801785742cdead21bfbf"} Dec 11 10:45:03 crc kubenswrapper[5016]: I1211 10:45:03.408461 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:03 crc kubenswrapper[5016]: I1211 10:45:03.602106 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f1c3e35-cef5-4190-a349-b4490f3fe796-secret-volume\") pod \"2f1c3e35-cef5-4190-a349-b4490f3fe796\" (UID: \"2f1c3e35-cef5-4190-a349-b4490f3fe796\") " Dec 11 10:45:03 crc kubenswrapper[5016]: I1211 10:45:03.602220 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwgk7\" (UniqueName: \"kubernetes.io/projected/2f1c3e35-cef5-4190-a349-b4490f3fe796-kube-api-access-nwgk7\") pod \"2f1c3e35-cef5-4190-a349-b4490f3fe796\" (UID: \"2f1c3e35-cef5-4190-a349-b4490f3fe796\") " Dec 11 10:45:03 crc kubenswrapper[5016]: I1211 10:45:03.602261 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f1c3e35-cef5-4190-a349-b4490f3fe796-config-volume\") pod \"2f1c3e35-cef5-4190-a349-b4490f3fe796\" (UID: \"2f1c3e35-cef5-4190-a349-b4490f3fe796\") " Dec 11 10:45:03 crc kubenswrapper[5016]: I1211 10:45:03.603135 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f1c3e35-cef5-4190-a349-b4490f3fe796-config-volume" (OuterVolumeSpecName: "config-volume") pod "2f1c3e35-cef5-4190-a349-b4490f3fe796" (UID: "2f1c3e35-cef5-4190-a349-b4490f3fe796"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:45:03 crc kubenswrapper[5016]: I1211 10:45:03.609219 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f1c3e35-cef5-4190-a349-b4490f3fe796-kube-api-access-nwgk7" (OuterVolumeSpecName: "kube-api-access-nwgk7") pod "2f1c3e35-cef5-4190-a349-b4490f3fe796" (UID: "2f1c3e35-cef5-4190-a349-b4490f3fe796"). InnerVolumeSpecName "kube-api-access-nwgk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:45:03 crc kubenswrapper[5016]: I1211 10:45:03.609687 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f1c3e35-cef5-4190-a349-b4490f3fe796-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2f1c3e35-cef5-4190-a349-b4490f3fe796" (UID: "2f1c3e35-cef5-4190-a349-b4490f3fe796"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:45:03 crc kubenswrapper[5016]: I1211 10:45:03.703792 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwgk7\" (UniqueName: \"kubernetes.io/projected/2f1c3e35-cef5-4190-a349-b4490f3fe796-kube-api-access-nwgk7\") on node \"crc\" DevicePath \"\"" Dec 11 10:45:03 crc kubenswrapper[5016]: I1211 10:45:03.703848 5016 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f1c3e35-cef5-4190-a349-b4490f3fe796-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 10:45:03 crc kubenswrapper[5016]: I1211 10:45:03.703860 5016 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f1c3e35-cef5-4190-a349-b4490f3fe796-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 10:45:04 crc kubenswrapper[5016]: I1211 10:45:04.090122 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" event={"ID":"2f1c3e35-cef5-4190-a349-b4490f3fe796","Type":"ContainerDied","Data":"5061c8763451152d1bc29ed203eabec8cfe0d435a6871967b7f8ad36107e9d68"} Dec 11 10:45:04 crc kubenswrapper[5016]: I1211 10:45:04.090161 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5061c8763451152d1bc29ed203eabec8cfe0d435a6871967b7f8ad36107e9d68" Dec 11 10:45:04 crc kubenswrapper[5016]: I1211 10:45:04.090172 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t" Dec 11 10:45:12 crc kubenswrapper[5016]: I1211 10:45:12.932766 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:45:12 crc kubenswrapper[5016]: I1211 10:45:12.933640 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:45:42 crc kubenswrapper[5016]: I1211 10:45:42.933513 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:45:42 crc kubenswrapper[5016]: I1211 10:45:42.934086 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:45:42 crc kubenswrapper[5016]: I1211 10:45:42.934155 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:45:42 crc kubenswrapper[5016]: I1211 10:45:42.935007 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"61d3252a2d684020f84ac3017dc378ce04486bdbb0fae848d8fd9fafa07cbdba"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 10:45:42 crc kubenswrapper[5016]: I1211 10:45:42.935083 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://61d3252a2d684020f84ac3017dc378ce04486bdbb0fae848d8fd9fafa07cbdba" gracePeriod=600 Dec 11 10:45:43 crc kubenswrapper[5016]: E1211 10:45:43.043749 5016 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode679c083_2480_4bc8_a8ea_dc2ff0412508.slice/crio-conmon-61d3252a2d684020f84ac3017dc378ce04486bdbb0fae848d8fd9fafa07cbdba.scope\": RecentStats: unable to find data in memory cache]" Dec 11 10:45:43 crc kubenswrapper[5016]: I1211 10:45:43.326511 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="61d3252a2d684020f84ac3017dc378ce04486bdbb0fae848d8fd9fafa07cbdba" exitCode=0 Dec 11 10:45:43 crc kubenswrapper[5016]: I1211 10:45:43.326551 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"61d3252a2d684020f84ac3017dc378ce04486bdbb0fae848d8fd9fafa07cbdba"} Dec 11 10:45:43 crc kubenswrapper[5016]: I1211 10:45:43.326874 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"5b248f167297cc4b041da14181a2d07d9de5add0be6fdd5562f37c434da09668"} Dec 11 10:45:43 crc kubenswrapper[5016]: I1211 10:45:43.326899 5016 scope.go:117] "RemoveContainer" containerID="793991a7e6d358bd8fbd2f0bae8254371015f24f8ff9bca5c69c392121b0afd1" Dec 11 10:45:58 crc kubenswrapper[5016]: I1211 10:45:58.762300 5016 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 11 10:48:12 crc kubenswrapper[5016]: I1211 10:48:12.932688 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:48:12 crc kubenswrapper[5016]: I1211 10:48:12.933683 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.533098 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-dlzf6"] Dec 11 10:48:20 crc kubenswrapper[5016]: E1211 10:48:20.534227 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f1c3e35-cef5-4190-a349-b4490f3fe796" containerName="collect-profiles" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.534252 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f1c3e35-cef5-4190-a349-b4490f3fe796" containerName="collect-profiles" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.534464 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f1c3e35-cef5-4190-a349-b4490f3fe796" containerName="collect-profiles" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.535123 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-dlzf6" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.538324 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.539559 5016 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-s44d2" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.539876 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.554077 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-dlzf6"] Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.566790 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-sm8ss"] Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.569485 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-sm8ss" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.571217 5016 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-vv42k" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.587663 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-sm8ss"] Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.603877 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-8pgz5"] Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.604956 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-8pgz5" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.608545 5016 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-npsnb" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.622244 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjbq7\" (UniqueName: \"kubernetes.io/projected/058f486a-6a97-4bc7-9e43-65af0e4b5634-kube-api-access-sjbq7\") pod \"cert-manager-cainjector-7f985d654d-dlzf6\" (UID: \"058f486a-6a97-4bc7-9e43-65af0e4b5634\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-dlzf6" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.622329 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bmr2\" (UniqueName: \"kubernetes.io/projected/f63479f5-0af6-4622-85fb-42bcfb115692-kube-api-access-6bmr2\") pod \"cert-manager-5b446d88c5-sm8ss\" (UID: \"f63479f5-0af6-4622-85fb-42bcfb115692\") " pod="cert-manager/cert-manager-5b446d88c5-sm8ss" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.622374 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpsvg\" (UniqueName: \"kubernetes.io/projected/e7d758da-34fb-4507-83f4-1e5f948d9249-kube-api-access-rpsvg\") pod \"cert-manager-webhook-5655c58dd6-8pgz5\" (UID: \"e7d758da-34fb-4507-83f4-1e5f948d9249\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-8pgz5" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.624197 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-8pgz5"] Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.723543 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjbq7\" (UniqueName: \"kubernetes.io/projected/058f486a-6a97-4bc7-9e43-65af0e4b5634-kube-api-access-sjbq7\") pod \"cert-manager-cainjector-7f985d654d-dlzf6\" (UID: \"058f486a-6a97-4bc7-9e43-65af0e4b5634\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-dlzf6" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.723595 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bmr2\" (UniqueName: \"kubernetes.io/projected/f63479f5-0af6-4622-85fb-42bcfb115692-kube-api-access-6bmr2\") pod \"cert-manager-5b446d88c5-sm8ss\" (UID: \"f63479f5-0af6-4622-85fb-42bcfb115692\") " pod="cert-manager/cert-manager-5b446d88c5-sm8ss" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.723631 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpsvg\" (UniqueName: \"kubernetes.io/projected/e7d758da-34fb-4507-83f4-1e5f948d9249-kube-api-access-rpsvg\") pod \"cert-manager-webhook-5655c58dd6-8pgz5\" (UID: \"e7d758da-34fb-4507-83f4-1e5f948d9249\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-8pgz5" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.745580 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjbq7\" (UniqueName: \"kubernetes.io/projected/058f486a-6a97-4bc7-9e43-65af0e4b5634-kube-api-access-sjbq7\") pod \"cert-manager-cainjector-7f985d654d-dlzf6\" (UID: \"058f486a-6a97-4bc7-9e43-65af0e4b5634\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-dlzf6" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.749583 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bmr2\" (UniqueName: \"kubernetes.io/projected/f63479f5-0af6-4622-85fb-42bcfb115692-kube-api-access-6bmr2\") pod \"cert-manager-5b446d88c5-sm8ss\" (UID: \"f63479f5-0af6-4622-85fb-42bcfb115692\") " pod="cert-manager/cert-manager-5b446d88c5-sm8ss" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.750157 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpsvg\" (UniqueName: \"kubernetes.io/projected/e7d758da-34fb-4507-83f4-1e5f948d9249-kube-api-access-rpsvg\") pod \"cert-manager-webhook-5655c58dd6-8pgz5\" (UID: \"e7d758da-34fb-4507-83f4-1e5f948d9249\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-8pgz5" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.851908 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-dlzf6" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.885889 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-sm8ss" Dec 11 10:48:20 crc kubenswrapper[5016]: I1211 10:48:20.923592 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-8pgz5" Dec 11 10:48:21 crc kubenswrapper[5016]: I1211 10:48:21.119905 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-dlzf6"] Dec 11 10:48:21 crc kubenswrapper[5016]: I1211 10:48:21.138749 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 10:48:21 crc kubenswrapper[5016]: I1211 10:48:21.180232 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-sm8ss"] Dec 11 10:48:21 crc kubenswrapper[5016]: W1211 10:48:21.184111 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf63479f5_0af6_4622_85fb_42bcfb115692.slice/crio-23f3f12184be9703380871e472e10ec7b20b1096f49cc3ab1e14c0d26dd70d8f WatchSource:0}: Error finding container 23f3f12184be9703380871e472e10ec7b20b1096f49cc3ab1e14c0d26dd70d8f: Status 404 returned error can't find the container with id 23f3f12184be9703380871e472e10ec7b20b1096f49cc3ab1e14c0d26dd70d8f Dec 11 10:48:21 crc kubenswrapper[5016]: I1211 10:48:21.236517 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-8pgz5"] Dec 11 10:48:21 crc kubenswrapper[5016]: W1211 10:48:21.240500 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7d758da_34fb_4507_83f4_1e5f948d9249.slice/crio-b624a70d932373306ea643c4c7fe55061eb38b3b2936fc3306ea7d8e5faf45cb WatchSource:0}: Error finding container b624a70d932373306ea643c4c7fe55061eb38b3b2936fc3306ea7d8e5faf45cb: Status 404 returned error can't find the container with id b624a70d932373306ea643c4c7fe55061eb38b3b2936fc3306ea7d8e5faf45cb Dec 11 10:48:21 crc kubenswrapper[5016]: I1211 10:48:21.365833 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-dlzf6" event={"ID":"058f486a-6a97-4bc7-9e43-65af0e4b5634","Type":"ContainerStarted","Data":"ce781a6bbd2e0571684a901aa3079f416b981132be8e6c62856fd17a172bbef6"} Dec 11 10:48:21 crc kubenswrapper[5016]: I1211 10:48:21.368728 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-8pgz5" event={"ID":"e7d758da-34fb-4507-83f4-1e5f948d9249","Type":"ContainerStarted","Data":"b624a70d932373306ea643c4c7fe55061eb38b3b2936fc3306ea7d8e5faf45cb"} Dec 11 10:48:21 crc kubenswrapper[5016]: I1211 10:48:21.370308 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-sm8ss" event={"ID":"f63479f5-0af6-4622-85fb-42bcfb115692","Type":"ContainerStarted","Data":"23f3f12184be9703380871e472e10ec7b20b1096f49cc3ab1e14c0d26dd70d8f"} Dec 11 10:48:23 crc kubenswrapper[5016]: I1211 10:48:23.383601 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-dlzf6" event={"ID":"058f486a-6a97-4bc7-9e43-65af0e4b5634","Type":"ContainerStarted","Data":"d58ee87c94ab311f67bdbe0195fd8d68f47a3e1a88fddf963092c6fd6a53ce68"} Dec 11 10:48:23 crc kubenswrapper[5016]: I1211 10:48:23.406917 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-dlzf6" podStartSLOduration=1.304563525 podStartE2EDuration="3.406887967s" podCreationTimestamp="2025-12-11 10:48:20 +0000 UTC" firstStartedPulling="2025-12-11 10:48:21.138395704 +0000 UTC m=+817.956955283" lastFinishedPulling="2025-12-11 10:48:23.240720146 +0000 UTC m=+820.059279725" observedRunningTime="2025-12-11 10:48:23.401173855 +0000 UTC m=+820.219733434" watchObservedRunningTime="2025-12-11 10:48:23.406887967 +0000 UTC m=+820.225447546" Dec 11 10:48:25 crc kubenswrapper[5016]: I1211 10:48:25.397909 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-8pgz5" event={"ID":"e7d758da-34fb-4507-83f4-1e5f948d9249","Type":"ContainerStarted","Data":"3fdfe84a8128b831ff5d250410d380e945e30d7f5f5581a3be851ddc8685813f"} Dec 11 10:48:25 crc kubenswrapper[5016]: I1211 10:48:25.398535 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-8pgz5" Dec 11 10:48:25 crc kubenswrapper[5016]: I1211 10:48:25.400506 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-sm8ss" event={"ID":"f63479f5-0af6-4622-85fb-42bcfb115692","Type":"ContainerStarted","Data":"e50ffdf72409d2d3b04a3a8aa4cdacd3933ff9cb0af6092e206551187b800162"} Dec 11 10:48:25 crc kubenswrapper[5016]: I1211 10:48:25.421288 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-8pgz5" podStartSLOduration=1.551364419 podStartE2EDuration="5.421264154s" podCreationTimestamp="2025-12-11 10:48:20 +0000 UTC" firstStartedPulling="2025-12-11 10:48:21.244506239 +0000 UTC m=+818.063065818" lastFinishedPulling="2025-12-11 10:48:25.114405974 +0000 UTC m=+821.932965553" observedRunningTime="2025-12-11 10:48:25.414976827 +0000 UTC m=+822.233536426" watchObservedRunningTime="2025-12-11 10:48:25.421264154 +0000 UTC m=+822.239823743" Dec 11 10:48:25 crc kubenswrapper[5016]: I1211 10:48:25.434567 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-sm8ss" podStartSLOduration=1.530729569 podStartE2EDuration="5.434548252s" podCreationTimestamp="2025-12-11 10:48:20 +0000 UTC" firstStartedPulling="2025-12-11 10:48:21.186431522 +0000 UTC m=+818.004991101" lastFinishedPulling="2025-12-11 10:48:25.090250195 +0000 UTC m=+821.908809784" observedRunningTime="2025-12-11 10:48:25.432633855 +0000 UTC m=+822.251193454" watchObservedRunningTime="2025-12-11 10:48:25.434548252 +0000 UTC m=+822.253107851" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.364090 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-7m8vj"] Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.365399 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="ovn-controller" containerID="cri-o://6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7" gracePeriod=30 Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.365448 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="northd" containerID="cri-o://91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e" gracePeriod=30 Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.365640 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="sbdb" containerID="cri-o://9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b" gracePeriod=30 Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.365701 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="nbdb" containerID="cri-o://887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2" gracePeriod=30 Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.365793 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="kube-rbac-proxy-node" containerID="cri-o://cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b" gracePeriod=30 Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.365835 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3" gracePeriod=30 Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.365830 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="ovn-acl-logging" containerID="cri-o://627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653" gracePeriod=30 Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.422127 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="ovnkube-controller" containerID="cri-o://36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b" gracePeriod=30 Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.649275 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7m8vj_1a090784-1b4b-4c21-b425-9ea90576fc74/ovn-acl-logging/0.log" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.650432 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7m8vj_1a090784-1b4b-4c21-b425-9ea90576fc74/ovn-controller/0.log" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.650973 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675106 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-ovnkube-config\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675172 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-node-log\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675191 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-ovn\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675210 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-var-lib-cni-networks-ovn-kubernetes\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675230 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-systemd\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675246 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-slash\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675300 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-run-netns\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675350 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-var-lib-openvswitch\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675371 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-systemd-units\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675391 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-log-socket\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675408 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-cni-netd\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675435 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-openvswitch\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675460 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1a090784-1b4b-4c21-b425-9ea90576fc74-ovn-node-metrics-cert\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675495 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-run-ovn-kubernetes\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675517 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-env-overrides\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675522 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-slash" (OuterVolumeSpecName: "host-slash") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675660 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-ovnkube-script-lib\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675680 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-etc-openvswitch\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675699 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-kubelet\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675734 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hw9x\" (UniqueName: \"kubernetes.io/projected/1a090784-1b4b-4c21-b425-9ea90576fc74-kube-api-access-6hw9x\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.675753 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-cni-bin\") pod \"1a090784-1b4b-4c21-b425-9ea90576fc74\" (UID: \"1a090784-1b4b-4c21-b425-9ea90576fc74\") " Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.676159 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.676204 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-node-log" (OuterVolumeSpecName: "node-log") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.676228 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.676249 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.676379 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.676505 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.676656 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.676754 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-log-socket" (OuterVolumeSpecName: "log-socket") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.676843 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.676917 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.677100 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.677108 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.677189 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.677239 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.677671 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.677967 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.687769 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a090784-1b4b-4c21-b425-9ea90576fc74-kube-api-access-6hw9x" (OuterVolumeSpecName: "kube-api-access-6hw9x") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "kube-api-access-6hw9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.690612 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a090784-1b4b-4c21-b425-9ea90576fc74-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.696597 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "1a090784-1b4b-4c21-b425-9ea90576fc74" (UID: "1a090784-1b4b-4c21-b425-9ea90576fc74"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.714088 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-kr8s2"] Dec 11 10:48:30 crc kubenswrapper[5016]: E1211 10:48:30.714577 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="kubecfg-setup" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.714606 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="kubecfg-setup" Dec 11 10:48:30 crc kubenswrapper[5016]: E1211 10:48:30.714627 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="ovn-controller" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.714637 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="ovn-controller" Dec 11 10:48:30 crc kubenswrapper[5016]: E1211 10:48:30.714665 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="ovnkube-controller" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.714676 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="ovnkube-controller" Dec 11 10:48:30 crc kubenswrapper[5016]: E1211 10:48:30.714695 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="ovn-acl-logging" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.714704 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="ovn-acl-logging" Dec 11 10:48:30 crc kubenswrapper[5016]: E1211 10:48:30.714727 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="kube-rbac-proxy-node" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.714735 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="kube-rbac-proxy-node" Dec 11 10:48:30 crc kubenswrapper[5016]: E1211 10:48:30.714750 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="kube-rbac-proxy-ovn-metrics" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.714759 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="kube-rbac-proxy-ovn-metrics" Dec 11 10:48:30 crc kubenswrapper[5016]: E1211 10:48:30.714801 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="sbdb" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.714814 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="sbdb" Dec 11 10:48:30 crc kubenswrapper[5016]: E1211 10:48:30.714828 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="northd" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.714837 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="northd" Dec 11 10:48:30 crc kubenswrapper[5016]: E1211 10:48:30.714847 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="nbdb" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.714855 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="nbdb" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.715039 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="kube-rbac-proxy-node" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.715058 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="kube-rbac-proxy-ovn-metrics" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.715067 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="ovnkube-controller" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.715083 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="northd" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.715097 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="nbdb" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.715112 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="ovn-acl-logging" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.715122 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="sbdb" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.715131 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerName="ovn-controller" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.717394 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.776807 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-var-lib-openvswitch\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.776862 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/efeebdb5-73f2-4a48-ac43-60f86f40b619-ovn-node-metrics-cert\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.776882 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/efeebdb5-73f2-4a48-ac43-60f86f40b619-env-overrides\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.776904 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-slash\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777145 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-log-socket\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777208 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-run-ovn\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777248 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-node-log\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777277 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-run-ovn-kubernetes\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777377 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-systemd-units\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777509 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-cni-netd\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777604 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-kubelet\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777634 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-run-netns\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777726 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/efeebdb5-73f2-4a48-ac43-60f86f40b619-ovnkube-script-lib\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777788 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-cni-bin\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777816 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-etc-openvswitch\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777845 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777868 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dfj5\" (UniqueName: \"kubernetes.io/projected/efeebdb5-73f2-4a48-ac43-60f86f40b619-kube-api-access-2dfj5\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777895 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-run-openvswitch\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777913 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/efeebdb5-73f2-4a48-ac43-60f86f40b619-ovnkube-config\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.777933 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-run-systemd\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778002 5016 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-run-netns\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778015 5016 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-systemd-units\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778025 5016 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778034 5016 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-log-socket\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778043 5016 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-cni-netd\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778053 5016 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778063 5016 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1a090784-1b4b-4c21-b425-9ea90576fc74-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778074 5016 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778083 5016 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778093 5016 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-kubelet\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778102 5016 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778111 5016 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778121 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hw9x\" (UniqueName: \"kubernetes.io/projected/1a090784-1b4b-4c21-b425-9ea90576fc74-kube-api-access-6hw9x\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778131 5016 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-cni-bin\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778141 5016 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1a090784-1b4b-4c21-b425-9ea90576fc74-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778151 5016 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-node-log\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778163 5016 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778173 5016 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778183 5016 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-host-slash\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.778193 5016 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1a090784-1b4b-4c21-b425-9ea90576fc74-run-systemd\") on node \"crc\" DevicePath \"\"" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.880517 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-cni-bin\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.880605 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-etc-openvswitch\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.880660 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.880703 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dfj5\" (UniqueName: \"kubernetes.io/projected/efeebdb5-73f2-4a48-ac43-60f86f40b619-kube-api-access-2dfj5\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.880754 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-run-openvswitch\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.880793 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/efeebdb5-73f2-4a48-ac43-60f86f40b619-ovnkube-config\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.880828 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-run-systemd\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.880859 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-etc-openvswitch\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.880892 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-var-lib-openvswitch\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.880987 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-run-systemd\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881026 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/efeebdb5-73f2-4a48-ac43-60f86f40b619-env-overrides\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881178 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/efeebdb5-73f2-4a48-ac43-60f86f40b619-ovn-node-metrics-cert\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881184 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-run-openvswitch\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.880859 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-cni-bin\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881301 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-slash\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881345 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-slash\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881196 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-var-lib-openvswitch\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881373 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-log-socket\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881402 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-log-socket\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881438 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-run-ovn\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881485 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-node-log\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881575 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-run-ovn-kubernetes\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881608 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-node-log\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881671 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/efeebdb5-73f2-4a48-ac43-60f86f40b619-env-overrides\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881682 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-systemd-units\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881647 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-run-ovn-kubernetes\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881575 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-run-ovn\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881745 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-cni-netd\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881768 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881846 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-systemd-units\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881873 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-cni-netd\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.881997 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-kubelet\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.882057 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-run-netns\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.882102 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-kubelet\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.882172 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/efeebdb5-73f2-4a48-ac43-60f86f40b619-host-run-netns\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.882213 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/efeebdb5-73f2-4a48-ac43-60f86f40b619-ovnkube-script-lib\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.882624 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/efeebdb5-73f2-4a48-ac43-60f86f40b619-ovnkube-config\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.883466 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/efeebdb5-73f2-4a48-ac43-60f86f40b619-ovnkube-script-lib\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.885444 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/efeebdb5-73f2-4a48-ac43-60f86f40b619-ovn-node-metrics-cert\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.912114 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dfj5\" (UniqueName: \"kubernetes.io/projected/efeebdb5-73f2-4a48-ac43-60f86f40b619-kube-api-access-2dfj5\") pod \"ovnkube-node-kr8s2\" (UID: \"efeebdb5-73f2-4a48-ac43-60f86f40b619\") " pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:30 crc kubenswrapper[5016]: I1211 10:48:30.926403 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-8pgz5" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.038875 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.446134 5016 generic.go:334] "Generic (PLEG): container finished" podID="efeebdb5-73f2-4a48-ac43-60f86f40b619" containerID="fc4bb35788678cb38a1d1e0c5d3293c28c77432ad9c9e1fa2c02076753c82b94" exitCode=0 Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.446178 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" event={"ID":"efeebdb5-73f2-4a48-ac43-60f86f40b619","Type":"ContainerDied","Data":"fc4bb35788678cb38a1d1e0c5d3293c28c77432ad9c9e1fa2c02076753c82b94"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.447551 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" event={"ID":"efeebdb5-73f2-4a48-ac43-60f86f40b619","Type":"ContainerStarted","Data":"72101d6673b27871ccf246b496a53d85b5a26de83b58f098a9732fc1ca52899d"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.451132 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-k9ssc_62530621-fff3-49c0-ba0d-14d7ec144c5f/kube-multus/0.log" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.451229 5016 generic.go:334] "Generic (PLEG): container finished" podID="62530621-fff3-49c0-ba0d-14d7ec144c5f" containerID="5ef674cb488ca21a343bb2740538b28a8ef41adf15b2fcae0eac5a1f9439e210" exitCode=2 Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.451366 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-k9ssc" event={"ID":"62530621-fff3-49c0-ba0d-14d7ec144c5f","Type":"ContainerDied","Data":"5ef674cb488ca21a343bb2740538b28a8ef41adf15b2fcae0eac5a1f9439e210"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.452451 5016 scope.go:117] "RemoveContainer" containerID="5ef674cb488ca21a343bb2740538b28a8ef41adf15b2fcae0eac5a1f9439e210" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.460769 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7m8vj_1a090784-1b4b-4c21-b425-9ea90576fc74/ovn-acl-logging/0.log" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.462093 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7m8vj_1a090784-1b4b-4c21-b425-9ea90576fc74/ovn-controller/0.log" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463219 5016 generic.go:334] "Generic (PLEG): container finished" podID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerID="36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b" exitCode=0 Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463267 5016 generic.go:334] "Generic (PLEG): container finished" podID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerID="9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b" exitCode=0 Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463285 5016 generic.go:334] "Generic (PLEG): container finished" podID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerID="887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2" exitCode=0 Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463302 5016 generic.go:334] "Generic (PLEG): container finished" podID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerID="91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e" exitCode=0 Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463319 5016 generic.go:334] "Generic (PLEG): container finished" podID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerID="1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3" exitCode=0 Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463333 5016 generic.go:334] "Generic (PLEG): container finished" podID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerID="cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b" exitCode=0 Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463347 5016 generic.go:334] "Generic (PLEG): container finished" podID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerID="627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653" exitCode=143 Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463366 5016 generic.go:334] "Generic (PLEG): container finished" podID="1a090784-1b4b-4c21-b425-9ea90576fc74" containerID="6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7" exitCode=143 Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463402 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerDied","Data":"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463447 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerDied","Data":"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463473 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerDied","Data":"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463498 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerDied","Data":"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463525 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerDied","Data":"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463547 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerDied","Data":"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463569 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463587 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463600 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463616 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerDied","Data":"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463633 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463646 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463657 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463669 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463680 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463692 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463704 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463716 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463728 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463745 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerDied","Data":"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463762 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463777 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463788 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463800 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463811 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463822 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463833 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463844 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463855 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463873 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" event={"ID":"1a090784-1b4b-4c21-b425-9ea90576fc74","Type":"ContainerDied","Data":"be4c2942be4019e7feb3d1bdce1b71545316d18531027f8142994eded60efb0a"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463889 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463898 5016 scope.go:117] "RemoveContainer" containerID="36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463902 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.464140 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.464178 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.464193 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.464207 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.464221 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.464250 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.464265 5016 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae"} Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.463723 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.532308 5016 scope.go:117] "RemoveContainer" containerID="9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.560563 5016 scope.go:117] "RemoveContainer" containerID="887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.577459 5016 scope.go:117] "RemoveContainer" containerID="91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.596327 5016 scope.go:117] "RemoveContainer" containerID="1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.614479 5016 scope.go:117] "RemoveContainer" containerID="cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.639010 5016 scope.go:117] "RemoveContainer" containerID="627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.655804 5016 scope.go:117] "RemoveContainer" containerID="6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.679286 5016 scope.go:117] "RemoveContainer" containerID="5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.696134 5016 scope.go:117] "RemoveContainer" containerID="36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b" Dec 11 10:48:31 crc kubenswrapper[5016]: E1211 10:48:31.696598 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b\": container with ID starting with 36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b not found: ID does not exist" containerID="36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.696650 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b"} err="failed to get container status \"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b\": rpc error: code = NotFound desc = could not find container \"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b\": container with ID starting with 36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.696689 5016 scope.go:117] "RemoveContainer" containerID="9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b" Dec 11 10:48:31 crc kubenswrapper[5016]: E1211 10:48:31.697175 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b\": container with ID starting with 9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b not found: ID does not exist" containerID="9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.697203 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b"} err="failed to get container status \"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b\": rpc error: code = NotFound desc = could not find container \"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b\": container with ID starting with 9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.697230 5016 scope.go:117] "RemoveContainer" containerID="887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2" Dec 11 10:48:31 crc kubenswrapper[5016]: E1211 10:48:31.697607 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2\": container with ID starting with 887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2 not found: ID does not exist" containerID="887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.697658 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2"} err="failed to get container status \"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2\": rpc error: code = NotFound desc = could not find container \"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2\": container with ID starting with 887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.697696 5016 scope.go:117] "RemoveContainer" containerID="91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e" Dec 11 10:48:31 crc kubenswrapper[5016]: E1211 10:48:31.698041 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e\": container with ID starting with 91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e not found: ID does not exist" containerID="91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.698067 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e"} err="failed to get container status \"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e\": rpc error: code = NotFound desc = could not find container \"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e\": container with ID starting with 91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.698090 5016 scope.go:117] "RemoveContainer" containerID="1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3" Dec 11 10:48:31 crc kubenswrapper[5016]: E1211 10:48:31.698359 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3\": container with ID starting with 1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3 not found: ID does not exist" containerID="1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.698389 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3"} err="failed to get container status \"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3\": rpc error: code = NotFound desc = could not find container \"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3\": container with ID starting with 1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.698403 5016 scope.go:117] "RemoveContainer" containerID="cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b" Dec 11 10:48:31 crc kubenswrapper[5016]: E1211 10:48:31.698647 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b\": container with ID starting with cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b not found: ID does not exist" containerID="cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.698682 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b"} err="failed to get container status \"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b\": rpc error: code = NotFound desc = could not find container \"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b\": container with ID starting with cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.698706 5016 scope.go:117] "RemoveContainer" containerID="627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653" Dec 11 10:48:31 crc kubenswrapper[5016]: E1211 10:48:31.698973 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653\": container with ID starting with 627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653 not found: ID does not exist" containerID="627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.699023 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653"} err="failed to get container status \"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653\": rpc error: code = NotFound desc = could not find container \"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653\": container with ID starting with 627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.699051 5016 scope.go:117] "RemoveContainer" containerID="6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7" Dec 11 10:48:31 crc kubenswrapper[5016]: E1211 10:48:31.699361 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7\": container with ID starting with 6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7 not found: ID does not exist" containerID="6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.699397 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7"} err="failed to get container status \"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7\": rpc error: code = NotFound desc = could not find container \"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7\": container with ID starting with 6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.699416 5016 scope.go:117] "RemoveContainer" containerID="5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae" Dec 11 10:48:31 crc kubenswrapper[5016]: E1211 10:48:31.699740 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae\": container with ID starting with 5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae not found: ID does not exist" containerID="5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.699770 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae"} err="failed to get container status \"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae\": rpc error: code = NotFound desc = could not find container \"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae\": container with ID starting with 5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.699788 5016 scope.go:117] "RemoveContainer" containerID="36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.699992 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b"} err="failed to get container status \"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b\": rpc error: code = NotFound desc = could not find container \"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b\": container with ID starting with 36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.700009 5016 scope.go:117] "RemoveContainer" containerID="9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.700231 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b"} err="failed to get container status \"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b\": rpc error: code = NotFound desc = could not find container \"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b\": container with ID starting with 9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.700254 5016 scope.go:117] "RemoveContainer" containerID="887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.700433 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2"} err="failed to get container status \"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2\": rpc error: code = NotFound desc = could not find container \"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2\": container with ID starting with 887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.700461 5016 scope.go:117] "RemoveContainer" containerID="91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.700669 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e"} err="failed to get container status \"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e\": rpc error: code = NotFound desc = could not find container \"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e\": container with ID starting with 91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.700697 5016 scope.go:117] "RemoveContainer" containerID="1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.700973 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3"} err="failed to get container status \"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3\": rpc error: code = NotFound desc = could not find container \"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3\": container with ID starting with 1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.701002 5016 scope.go:117] "RemoveContainer" containerID="cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.701219 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b"} err="failed to get container status \"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b\": rpc error: code = NotFound desc = could not find container \"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b\": container with ID starting with cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.701419 5016 scope.go:117] "RemoveContainer" containerID="627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.701693 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653"} err="failed to get container status \"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653\": rpc error: code = NotFound desc = could not find container \"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653\": container with ID starting with 627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.701718 5016 scope.go:117] "RemoveContainer" containerID="6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.702008 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7"} err="failed to get container status \"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7\": rpc error: code = NotFound desc = could not find container \"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7\": container with ID starting with 6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.702042 5016 scope.go:117] "RemoveContainer" containerID="5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.702300 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae"} err="failed to get container status \"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae\": rpc error: code = NotFound desc = could not find container \"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae\": container with ID starting with 5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.702332 5016 scope.go:117] "RemoveContainer" containerID="36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.702586 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b"} err="failed to get container status \"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b\": rpc error: code = NotFound desc = could not find container \"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b\": container with ID starting with 36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.702619 5016 scope.go:117] "RemoveContainer" containerID="9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.703314 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b"} err="failed to get container status \"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b\": rpc error: code = NotFound desc = could not find container \"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b\": container with ID starting with 9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.703346 5016 scope.go:117] "RemoveContainer" containerID="887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.703797 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2"} err="failed to get container status \"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2\": rpc error: code = NotFound desc = could not find container \"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2\": container with ID starting with 887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.703824 5016 scope.go:117] "RemoveContainer" containerID="91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.704103 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e"} err="failed to get container status \"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e\": rpc error: code = NotFound desc = could not find container \"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e\": container with ID starting with 91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.704139 5016 scope.go:117] "RemoveContainer" containerID="1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.704498 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3"} err="failed to get container status \"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3\": rpc error: code = NotFound desc = could not find container \"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3\": container with ID starting with 1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.704525 5016 scope.go:117] "RemoveContainer" containerID="cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.704927 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b"} err="failed to get container status \"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b\": rpc error: code = NotFound desc = could not find container \"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b\": container with ID starting with cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.704985 5016 scope.go:117] "RemoveContainer" containerID="627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.705525 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653"} err="failed to get container status \"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653\": rpc error: code = NotFound desc = could not find container \"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653\": container with ID starting with 627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.705560 5016 scope.go:117] "RemoveContainer" containerID="6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.706080 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7"} err="failed to get container status \"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7\": rpc error: code = NotFound desc = could not find container \"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7\": container with ID starting with 6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.706116 5016 scope.go:117] "RemoveContainer" containerID="5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.706446 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae"} err="failed to get container status \"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae\": rpc error: code = NotFound desc = could not find container \"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae\": container with ID starting with 5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.706471 5016 scope.go:117] "RemoveContainer" containerID="36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.707016 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b"} err="failed to get container status \"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b\": rpc error: code = NotFound desc = could not find container \"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b\": container with ID starting with 36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.707078 5016 scope.go:117] "RemoveContainer" containerID="9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.707418 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b"} err="failed to get container status \"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b\": rpc error: code = NotFound desc = could not find container \"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b\": container with ID starting with 9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.707452 5016 scope.go:117] "RemoveContainer" containerID="887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.707787 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2"} err="failed to get container status \"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2\": rpc error: code = NotFound desc = could not find container \"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2\": container with ID starting with 887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.708012 5016 scope.go:117] "RemoveContainer" containerID="91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.708314 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e"} err="failed to get container status \"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e\": rpc error: code = NotFound desc = could not find container \"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e\": container with ID starting with 91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.708342 5016 scope.go:117] "RemoveContainer" containerID="1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.708675 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3"} err="failed to get container status \"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3\": rpc error: code = NotFound desc = could not find container \"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3\": container with ID starting with 1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.708716 5016 scope.go:117] "RemoveContainer" containerID="cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.709123 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b"} err="failed to get container status \"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b\": rpc error: code = NotFound desc = could not find container \"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b\": container with ID starting with cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.709205 5016 scope.go:117] "RemoveContainer" containerID="627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.709520 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653"} err="failed to get container status \"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653\": rpc error: code = NotFound desc = could not find container \"627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653\": container with ID starting with 627b09b1674e67ff7ce22f167113f6cb9e9043b1c34a757d064a94be1efc7653 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.709553 5016 scope.go:117] "RemoveContainer" containerID="6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.710025 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7"} err="failed to get container status \"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7\": rpc error: code = NotFound desc = could not find container \"6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7\": container with ID starting with 6b8f08f309fdb9ae111ca3ee5427300e406ccb81fbee752fd28d11e5bea348e7 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.710059 5016 scope.go:117] "RemoveContainer" containerID="5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.710355 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae"} err="failed to get container status \"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae\": rpc error: code = NotFound desc = could not find container \"5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae\": container with ID starting with 5ae3828a18cbcecade785be8329cb678e3c06fdc488e79f3bf4fcc8c481051ae not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.710388 5016 scope.go:117] "RemoveContainer" containerID="36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.710645 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b"} err="failed to get container status \"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b\": rpc error: code = NotFound desc = could not find container \"36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b\": container with ID starting with 36a82f8588f8c8254311b2567df701f5850a325e1a12c60acb20f28eaaaebf8b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.710668 5016 scope.go:117] "RemoveContainer" containerID="9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.711051 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b"} err="failed to get container status \"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b\": rpc error: code = NotFound desc = could not find container \"9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b\": container with ID starting with 9794b82a7fd6d5d28c7fc9aea10beecc244852eac54a92f2bbb9748a2ec3ed2b not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.711077 5016 scope.go:117] "RemoveContainer" containerID="887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.711351 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2"} err="failed to get container status \"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2\": rpc error: code = NotFound desc = could not find container \"887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2\": container with ID starting with 887947bbe59d340e71eaa7c6b3b5257c1fc727d38a93ea31a717489c1bc81dd2 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.711377 5016 scope.go:117] "RemoveContainer" containerID="91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.711664 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e"} err="failed to get container status \"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e\": rpc error: code = NotFound desc = could not find container \"91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e\": container with ID starting with 91a54d393daaa6188376309c09ff2b9e184fbd8c630219e7874add126226d98e not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.711701 5016 scope.go:117] "RemoveContainer" containerID="1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.712038 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3"} err="failed to get container status \"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3\": rpc error: code = NotFound desc = could not find container \"1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3\": container with ID starting with 1388b286783c0186d0335fde51d7c184b36c8fdf3a021815bf436f20f47d59c3 not found: ID does not exist" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.712076 5016 scope.go:117] "RemoveContainer" containerID="cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b" Dec 11 10:48:31 crc kubenswrapper[5016]: I1211 10:48:31.712348 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b"} err="failed to get container status \"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b\": rpc error: code = NotFound desc = could not find container \"cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b\": container with ID starting with cea4d500132f408c37e6375ba9c4ac4180f0a4c41464017b3850a5e1fba0a63b not found: ID does not exist" Dec 11 10:48:32 crc kubenswrapper[5016]: I1211 10:48:32.486429 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" event={"ID":"efeebdb5-73f2-4a48-ac43-60f86f40b619","Type":"ContainerStarted","Data":"f75e0b71a257eeb5d0aa491275a28607eb5e806382a26dbd36315f98b39bd659"} Dec 11 10:48:32 crc kubenswrapper[5016]: I1211 10:48:32.487071 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" event={"ID":"efeebdb5-73f2-4a48-ac43-60f86f40b619","Type":"ContainerStarted","Data":"4a0b7eaec459b786dd4d340b8ceba2b2aa6dd536601260ccd250e407db19fbe3"} Dec 11 10:48:32 crc kubenswrapper[5016]: I1211 10:48:32.487099 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" event={"ID":"efeebdb5-73f2-4a48-ac43-60f86f40b619","Type":"ContainerStarted","Data":"975c3c01ef4bff19dc6e3aef055ca53846ad771bb266ac78993819a94d7852f7"} Dec 11 10:48:32 crc kubenswrapper[5016]: I1211 10:48:32.487118 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" event={"ID":"efeebdb5-73f2-4a48-ac43-60f86f40b619","Type":"ContainerStarted","Data":"f4c55e8ebd7d3abcf296df6e4ec2442507fbc4aaf1375fe61143abd1211428a8"} Dec 11 10:48:32 crc kubenswrapper[5016]: I1211 10:48:32.491898 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-k9ssc_62530621-fff3-49c0-ba0d-14d7ec144c5f/kube-multus/0.log" Dec 11 10:48:32 crc kubenswrapper[5016]: I1211 10:48:32.491997 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-k9ssc" event={"ID":"62530621-fff3-49c0-ba0d-14d7ec144c5f","Type":"ContainerStarted","Data":"60e774359cd22f559f24262d42df3eebab013c039e38fa5aa0a17ac9f2ddc6d6"} Dec 11 10:48:33 crc kubenswrapper[5016]: I1211 10:48:33.507663 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" event={"ID":"efeebdb5-73f2-4a48-ac43-60f86f40b619","Type":"ContainerStarted","Data":"17ad1d30435e0494fe59683b22b0c3d55502933bfe6dda53b606ca143c7be9f1"} Dec 11 10:48:33 crc kubenswrapper[5016]: I1211 10:48:33.507735 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" event={"ID":"efeebdb5-73f2-4a48-ac43-60f86f40b619","Type":"ContainerStarted","Data":"b83110faaa51641c62e831fb33f46dea988343e80133ef45527a542f8a6ef9f5"} Dec 11 10:48:35 crc kubenswrapper[5016]: I1211 10:48:35.525515 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" event={"ID":"efeebdb5-73f2-4a48-ac43-60f86f40b619","Type":"ContainerStarted","Data":"5854ca2b97f5c6af32ec7880eefa5a22fde398bb7cbdf207f3e2e107861082bc"} Dec 11 10:48:37 crc kubenswrapper[5016]: I1211 10:48:37.548394 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" event={"ID":"efeebdb5-73f2-4a48-ac43-60f86f40b619","Type":"ContainerStarted","Data":"371a224a47612068e3188482bab94b5810630f5ef54b9173867e4dd67a7c6257"} Dec 11 10:48:37 crc kubenswrapper[5016]: I1211 10:48:37.549524 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:37 crc kubenswrapper[5016]: I1211 10:48:37.549562 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:37 crc kubenswrapper[5016]: I1211 10:48:37.592199 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" podStartSLOduration=7.59218104 podStartE2EDuration="7.59218104s" podCreationTimestamp="2025-12-11 10:48:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:48:37.590380246 +0000 UTC m=+834.408939835" watchObservedRunningTime="2025-12-11 10:48:37.59218104 +0000 UTC m=+834.410740609" Dec 11 10:48:37 crc kubenswrapper[5016]: I1211 10:48:37.600259 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:38 crc kubenswrapper[5016]: I1211 10:48:38.558152 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:38 crc kubenswrapper[5016]: I1211 10:48:38.593391 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:48:42 crc kubenswrapper[5016]: I1211 10:48:42.933305 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:48:42 crc kubenswrapper[5016]: I1211 10:48:42.934441 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:49:01 crc kubenswrapper[5016]: I1211 10:49:01.071338 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-kr8s2" Dec 11 10:49:01 crc kubenswrapper[5016]: I1211 10:49:01.519305 5016 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","burstable","pod1a090784-1b4b-4c21-b425-9ea90576fc74"] err="unable to destroy cgroup paths for cgroup [kubepods burstable pod1a090784-1b4b-4c21-b425-9ea90576fc74] : Timed out while waiting for systemd to remove kubepods-burstable-pod1a090784_1b4b_4c21_b425_9ea90576fc74.slice" Dec 11 10:49:01 crc kubenswrapper[5016]: E1211 10:49:01.519777 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods burstable pod1a090784-1b4b-4c21-b425-9ea90576fc74] : unable to destroy cgroup paths for cgroup [kubepods burstable pod1a090784-1b4b-4c21-b425-9ea90576fc74] : Timed out while waiting for systemd to remove kubepods-burstable-pod1a090784_1b4b_4c21_b425_9ea90576fc74.slice" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" Dec 11 10:49:01 crc kubenswrapper[5016]: I1211 10:49:01.706803 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7m8vj" Dec 11 10:49:01 crc kubenswrapper[5016]: I1211 10:49:01.739394 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-7m8vj"] Dec 11 10:49:01 crc kubenswrapper[5016]: I1211 10:49:01.746229 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-7m8vj"] Dec 11 10:49:03 crc kubenswrapper[5016]: I1211 10:49:03.483543 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a090784-1b4b-4c21-b425-9ea90576fc74" path="/var/lib/kubelet/pods/1a090784-1b4b-4c21-b425-9ea90576fc74/volumes" Dec 11 10:49:12 crc kubenswrapper[5016]: I1211 10:49:12.932566 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:49:12 crc kubenswrapper[5016]: I1211 10:49:12.933160 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:49:12 crc kubenswrapper[5016]: I1211 10:49:12.933220 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:49:12 crc kubenswrapper[5016]: I1211 10:49:12.933906 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5b248f167297cc4b041da14181a2d07d9de5add0be6fdd5562f37c434da09668"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 10:49:12 crc kubenswrapper[5016]: I1211 10:49:12.933977 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://5b248f167297cc4b041da14181a2d07d9de5add0be6fdd5562f37c434da09668" gracePeriod=600 Dec 11 10:49:13 crc kubenswrapper[5016]: I1211 10:49:13.780598 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="5b248f167297cc4b041da14181a2d07d9de5add0be6fdd5562f37c434da09668" exitCode=0 Dec 11 10:49:13 crc kubenswrapper[5016]: I1211 10:49:13.780689 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"5b248f167297cc4b041da14181a2d07d9de5add0be6fdd5562f37c434da09668"} Dec 11 10:49:13 crc kubenswrapper[5016]: I1211 10:49:13.781000 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"512f5c783f58cb8b023d09c68e6c5e485f14c303c2f06e1b8d93d73bedfab5d9"} Dec 11 10:49:13 crc kubenswrapper[5016]: I1211 10:49:13.781025 5016 scope.go:117] "RemoveContainer" containerID="61d3252a2d684020f84ac3017dc378ce04486bdbb0fae848d8fd9fafa07cbdba" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.278665 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc"] Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.280368 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.282981 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.295387 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc"] Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.336271 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/20233dbc-fd39-4958-bde1-4912a7363bf7-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc\" (UID: \"20233dbc-fd39-4958-bde1-4912a7363bf7\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.336758 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5smx2\" (UniqueName: \"kubernetes.io/projected/20233dbc-fd39-4958-bde1-4912a7363bf7-kube-api-access-5smx2\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc\" (UID: \"20233dbc-fd39-4958-bde1-4912a7363bf7\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.337002 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/20233dbc-fd39-4958-bde1-4912a7363bf7-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc\" (UID: \"20233dbc-fd39-4958-bde1-4912a7363bf7\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.438664 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/20233dbc-fd39-4958-bde1-4912a7363bf7-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc\" (UID: \"20233dbc-fd39-4958-bde1-4912a7363bf7\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.439071 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5smx2\" (UniqueName: \"kubernetes.io/projected/20233dbc-fd39-4958-bde1-4912a7363bf7-kube-api-access-5smx2\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc\" (UID: \"20233dbc-fd39-4958-bde1-4912a7363bf7\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.439162 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/20233dbc-fd39-4958-bde1-4912a7363bf7-bundle\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc\" (UID: \"20233dbc-fd39-4958-bde1-4912a7363bf7\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.439192 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/20233dbc-fd39-4958-bde1-4912a7363bf7-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc\" (UID: \"20233dbc-fd39-4958-bde1-4912a7363bf7\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.439519 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/20233dbc-fd39-4958-bde1-4912a7363bf7-util\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc\" (UID: \"20233dbc-fd39-4958-bde1-4912a7363bf7\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.461265 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5smx2\" (UniqueName: \"kubernetes.io/projected/20233dbc-fd39-4958-bde1-4912a7363bf7-kube-api-access-5smx2\") pod \"98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc\" (UID: \"20233dbc-fd39-4958-bde1-4912a7363bf7\") " pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.597489 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:15 crc kubenswrapper[5016]: I1211 10:49:15.856558 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc"] Dec 11 10:49:15 crc kubenswrapper[5016]: W1211 10:49:15.865624 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20233dbc_fd39_4958_bde1_4912a7363bf7.slice/crio-96dd3c1c6ec398a966333f7c449c1d397eab36fa9930cb45c39f795273c57b71 WatchSource:0}: Error finding container 96dd3c1c6ec398a966333f7c449c1d397eab36fa9930cb45c39f795273c57b71: Status 404 returned error can't find the container with id 96dd3c1c6ec398a966333f7c449c1d397eab36fa9930cb45c39f795273c57b71 Dec 11 10:49:16 crc kubenswrapper[5016]: I1211 10:49:16.804082 5016 generic.go:334] "Generic (PLEG): container finished" podID="20233dbc-fd39-4958-bde1-4912a7363bf7" containerID="5d82f73a27895e5028428afc915b0b46881e569c62b7e3eae158936b806407f0" exitCode=0 Dec 11 10:49:16 crc kubenswrapper[5016]: I1211 10:49:16.804142 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" event={"ID":"20233dbc-fd39-4958-bde1-4912a7363bf7","Type":"ContainerDied","Data":"5d82f73a27895e5028428afc915b0b46881e569c62b7e3eae158936b806407f0"} Dec 11 10:49:16 crc kubenswrapper[5016]: I1211 10:49:16.804213 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" event={"ID":"20233dbc-fd39-4958-bde1-4912a7363bf7","Type":"ContainerStarted","Data":"96dd3c1c6ec398a966333f7c449c1d397eab36fa9930cb45c39f795273c57b71"} Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.423881 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-sdvm6"] Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.426249 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.438297 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sdvm6"] Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.472243 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c70c967d-c125-452b-95b7-8e590202479e-catalog-content\") pod \"redhat-operators-sdvm6\" (UID: \"c70c967d-c125-452b-95b7-8e590202479e\") " pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.472429 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrl2j\" (UniqueName: \"kubernetes.io/projected/c70c967d-c125-452b-95b7-8e590202479e-kube-api-access-nrl2j\") pod \"redhat-operators-sdvm6\" (UID: \"c70c967d-c125-452b-95b7-8e590202479e\") " pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.472469 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c70c967d-c125-452b-95b7-8e590202479e-utilities\") pod \"redhat-operators-sdvm6\" (UID: \"c70c967d-c125-452b-95b7-8e590202479e\") " pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.574096 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c70c967d-c125-452b-95b7-8e590202479e-catalog-content\") pod \"redhat-operators-sdvm6\" (UID: \"c70c967d-c125-452b-95b7-8e590202479e\") " pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.575238 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c70c967d-c125-452b-95b7-8e590202479e-catalog-content\") pod \"redhat-operators-sdvm6\" (UID: \"c70c967d-c125-452b-95b7-8e590202479e\") " pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.576406 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrl2j\" (UniqueName: \"kubernetes.io/projected/c70c967d-c125-452b-95b7-8e590202479e-kube-api-access-nrl2j\") pod \"redhat-operators-sdvm6\" (UID: \"c70c967d-c125-452b-95b7-8e590202479e\") " pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.576915 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c70c967d-c125-452b-95b7-8e590202479e-utilities\") pod \"redhat-operators-sdvm6\" (UID: \"c70c967d-c125-452b-95b7-8e590202479e\") " pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.577201 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c70c967d-c125-452b-95b7-8e590202479e-utilities\") pod \"redhat-operators-sdvm6\" (UID: \"c70c967d-c125-452b-95b7-8e590202479e\") " pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.596849 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrl2j\" (UniqueName: \"kubernetes.io/projected/c70c967d-c125-452b-95b7-8e590202479e-kube-api-access-nrl2j\") pod \"redhat-operators-sdvm6\" (UID: \"c70c967d-c125-452b-95b7-8e590202479e\") " pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.751313 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:17 crc kubenswrapper[5016]: I1211 10:49:17.970470 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sdvm6"] Dec 11 10:49:17 crc kubenswrapper[5016]: W1211 10:49:17.982221 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc70c967d_c125_452b_95b7_8e590202479e.slice/crio-91e142bfda392fb483ff9692935ccef42bad84582c80436d669842ebf58d3fe2 WatchSource:0}: Error finding container 91e142bfda392fb483ff9692935ccef42bad84582c80436d669842ebf58d3fe2: Status 404 returned error can't find the container with id 91e142bfda392fb483ff9692935ccef42bad84582c80436d669842ebf58d3fe2 Dec 11 10:49:18 crc kubenswrapper[5016]: I1211 10:49:18.822917 5016 generic.go:334] "Generic (PLEG): container finished" podID="20233dbc-fd39-4958-bde1-4912a7363bf7" containerID="2e04161718f580361a410e33d256718f0aeb74af6615499b8085ec3199ee9b02" exitCode=0 Dec 11 10:49:18 crc kubenswrapper[5016]: I1211 10:49:18.822956 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" event={"ID":"20233dbc-fd39-4958-bde1-4912a7363bf7","Type":"ContainerDied","Data":"2e04161718f580361a410e33d256718f0aeb74af6615499b8085ec3199ee9b02"} Dec 11 10:49:18 crc kubenswrapper[5016]: I1211 10:49:18.824647 5016 generic.go:334] "Generic (PLEG): container finished" podID="c70c967d-c125-452b-95b7-8e590202479e" containerID="7d1910dbb7665a7ec85bce054326b987b17aeabdaf5efa5bd2e98c13af8f8d7f" exitCode=0 Dec 11 10:49:18 crc kubenswrapper[5016]: I1211 10:49:18.824690 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdvm6" event={"ID":"c70c967d-c125-452b-95b7-8e590202479e","Type":"ContainerDied","Data":"7d1910dbb7665a7ec85bce054326b987b17aeabdaf5efa5bd2e98c13af8f8d7f"} Dec 11 10:49:18 crc kubenswrapper[5016]: I1211 10:49:18.824707 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdvm6" event={"ID":"c70c967d-c125-452b-95b7-8e590202479e","Type":"ContainerStarted","Data":"91e142bfda392fb483ff9692935ccef42bad84582c80436d669842ebf58d3fe2"} Dec 11 10:49:19 crc kubenswrapper[5016]: I1211 10:49:19.836683 5016 generic.go:334] "Generic (PLEG): container finished" podID="20233dbc-fd39-4958-bde1-4912a7363bf7" containerID="75d46820acef73a75c62e30035314542ed9148caebba5f2b7252ca9ef30e93bc" exitCode=0 Dec 11 10:49:19 crc kubenswrapper[5016]: I1211 10:49:19.836878 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" event={"ID":"20233dbc-fd39-4958-bde1-4912a7363bf7","Type":"ContainerDied","Data":"75d46820acef73a75c62e30035314542ed9148caebba5f2b7252ca9ef30e93bc"} Dec 11 10:49:19 crc kubenswrapper[5016]: I1211 10:49:19.841345 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdvm6" event={"ID":"c70c967d-c125-452b-95b7-8e590202479e","Type":"ContainerStarted","Data":"2dfad9d40b51fd7718e0ab4288b830ee9dce4cc22e06e94b14694e662f7e805c"} Dec 11 10:49:20 crc kubenswrapper[5016]: I1211 10:49:20.848896 5016 generic.go:334] "Generic (PLEG): container finished" podID="c70c967d-c125-452b-95b7-8e590202479e" containerID="2dfad9d40b51fd7718e0ab4288b830ee9dce4cc22e06e94b14694e662f7e805c" exitCode=0 Dec 11 10:49:20 crc kubenswrapper[5016]: I1211 10:49:20.848971 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdvm6" event={"ID":"c70c967d-c125-452b-95b7-8e590202479e","Type":"ContainerDied","Data":"2dfad9d40b51fd7718e0ab4288b830ee9dce4cc22e06e94b14694e662f7e805c"} Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.148192 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.238290 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/20233dbc-fd39-4958-bde1-4912a7363bf7-util\") pod \"20233dbc-fd39-4958-bde1-4912a7363bf7\" (UID: \"20233dbc-fd39-4958-bde1-4912a7363bf7\") " Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.238446 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5smx2\" (UniqueName: \"kubernetes.io/projected/20233dbc-fd39-4958-bde1-4912a7363bf7-kube-api-access-5smx2\") pod \"20233dbc-fd39-4958-bde1-4912a7363bf7\" (UID: \"20233dbc-fd39-4958-bde1-4912a7363bf7\") " Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.238518 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/20233dbc-fd39-4958-bde1-4912a7363bf7-bundle\") pod \"20233dbc-fd39-4958-bde1-4912a7363bf7\" (UID: \"20233dbc-fd39-4958-bde1-4912a7363bf7\") " Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.239611 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20233dbc-fd39-4958-bde1-4912a7363bf7-bundle" (OuterVolumeSpecName: "bundle") pod "20233dbc-fd39-4958-bde1-4912a7363bf7" (UID: "20233dbc-fd39-4958-bde1-4912a7363bf7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.245997 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20233dbc-fd39-4958-bde1-4912a7363bf7-kube-api-access-5smx2" (OuterVolumeSpecName: "kube-api-access-5smx2") pod "20233dbc-fd39-4958-bde1-4912a7363bf7" (UID: "20233dbc-fd39-4958-bde1-4912a7363bf7"). InnerVolumeSpecName "kube-api-access-5smx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.255316 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20233dbc-fd39-4958-bde1-4912a7363bf7-util" (OuterVolumeSpecName: "util") pod "20233dbc-fd39-4958-bde1-4912a7363bf7" (UID: "20233dbc-fd39-4958-bde1-4912a7363bf7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.340665 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5smx2\" (UniqueName: \"kubernetes.io/projected/20233dbc-fd39-4958-bde1-4912a7363bf7-kube-api-access-5smx2\") on node \"crc\" DevicePath \"\"" Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.340714 5016 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/20233dbc-fd39-4958-bde1-4912a7363bf7-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.340726 5016 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/20233dbc-fd39-4958-bde1-4912a7363bf7-util\") on node \"crc\" DevicePath \"\"" Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.857126 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" event={"ID":"20233dbc-fd39-4958-bde1-4912a7363bf7","Type":"ContainerDied","Data":"96dd3c1c6ec398a966333f7c449c1d397eab36fa9930cb45c39f795273c57b71"} Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.857687 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96dd3c1c6ec398a966333f7c449c1d397eab36fa9930cb45c39f795273c57b71" Dec 11 10:49:21 crc kubenswrapper[5016]: I1211 10:49:21.857262 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc" Dec 11 10:49:22 crc kubenswrapper[5016]: I1211 10:49:22.868434 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdvm6" event={"ID":"c70c967d-c125-452b-95b7-8e590202479e","Type":"ContainerStarted","Data":"af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3"} Dec 11 10:49:22 crc kubenswrapper[5016]: I1211 10:49:22.890108 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-sdvm6" podStartSLOduration=2.174175178 podStartE2EDuration="5.890087165s" podCreationTimestamp="2025-12-11 10:49:17 +0000 UTC" firstStartedPulling="2025-12-11 10:49:18.825580658 +0000 UTC m=+875.644140247" lastFinishedPulling="2025-12-11 10:49:22.541492645 +0000 UTC m=+879.360052234" observedRunningTime="2025-12-11 10:49:22.889916581 +0000 UTC m=+879.708476160" watchObservedRunningTime="2025-12-11 10:49:22.890087165 +0000 UTC m=+879.708646734" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.374042 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-bcg49"] Dec 11 10:49:25 crc kubenswrapper[5016]: E1211 10:49:25.374343 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20233dbc-fd39-4958-bde1-4912a7363bf7" containerName="util" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.374360 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="20233dbc-fd39-4958-bde1-4912a7363bf7" containerName="util" Dec 11 10:49:25 crc kubenswrapper[5016]: E1211 10:49:25.374379 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20233dbc-fd39-4958-bde1-4912a7363bf7" containerName="pull" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.374384 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="20233dbc-fd39-4958-bde1-4912a7363bf7" containerName="pull" Dec 11 10:49:25 crc kubenswrapper[5016]: E1211 10:49:25.374393 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20233dbc-fd39-4958-bde1-4912a7363bf7" containerName="extract" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.374400 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="20233dbc-fd39-4958-bde1-4912a7363bf7" containerName="extract" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.374519 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="20233dbc-fd39-4958-bde1-4912a7363bf7" containerName="extract" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.375024 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-6769fb99d-bcg49" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.377435 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.377782 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-szrxc" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.382669 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.387399 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-bcg49"] Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.507191 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp68l\" (UniqueName: \"kubernetes.io/projected/040df3df-7870-45d8-b15c-4f083db8385f-kube-api-access-tp68l\") pod \"nmstate-operator-6769fb99d-bcg49\" (UID: \"040df3df-7870-45d8-b15c-4f083db8385f\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-bcg49" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.608629 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp68l\" (UniqueName: \"kubernetes.io/projected/040df3df-7870-45d8-b15c-4f083db8385f-kube-api-access-tp68l\") pod \"nmstate-operator-6769fb99d-bcg49\" (UID: \"040df3df-7870-45d8-b15c-4f083db8385f\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-bcg49" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.637892 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp68l\" (UniqueName: \"kubernetes.io/projected/040df3df-7870-45d8-b15c-4f083db8385f-kube-api-access-tp68l\") pod \"nmstate-operator-6769fb99d-bcg49\" (UID: \"040df3df-7870-45d8-b15c-4f083db8385f\") " pod="openshift-nmstate/nmstate-operator-6769fb99d-bcg49" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.691328 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-6769fb99d-bcg49" Dec 11 10:49:25 crc kubenswrapper[5016]: I1211 10:49:25.926955 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-6769fb99d-bcg49"] Dec 11 10:49:26 crc kubenswrapper[5016]: I1211 10:49:26.894689 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-6769fb99d-bcg49" event={"ID":"040df3df-7870-45d8-b15c-4f083db8385f","Type":"ContainerStarted","Data":"5c8bc4f540517a0da7ea351944c36c111a73334e5e9fb4f86fd7018bf17c9208"} Dec 11 10:49:27 crc kubenswrapper[5016]: I1211 10:49:27.751678 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:27 crc kubenswrapper[5016]: I1211 10:49:27.752011 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:28 crc kubenswrapper[5016]: I1211 10:49:28.802710 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-sdvm6" podUID="c70c967d-c125-452b-95b7-8e590202479e" containerName="registry-server" probeResult="failure" output=< Dec 11 10:49:28 crc kubenswrapper[5016]: timeout: failed to connect service ":50051" within 1s Dec 11 10:49:28 crc kubenswrapper[5016]: > Dec 11 10:49:33 crc kubenswrapper[5016]: I1211 10:49:33.959023 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-6769fb99d-bcg49" event={"ID":"040df3df-7870-45d8-b15c-4f083db8385f","Type":"ContainerStarted","Data":"dd619b0cd2e5eff9da0b64faac98bcd4d79b980c0204969efb9182f6d3cd8eb0"} Dec 11 10:49:33 crc kubenswrapper[5016]: I1211 10:49:33.982879 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-6769fb99d-bcg49" podStartSLOduration=1.245616432 podStartE2EDuration="8.982860854s" podCreationTimestamp="2025-12-11 10:49:25 +0000 UTC" firstStartedPulling="2025-12-11 10:49:25.93877434 +0000 UTC m=+882.757333919" lastFinishedPulling="2025-12-11 10:49:33.676018752 +0000 UTC m=+890.494578341" observedRunningTime="2025-12-11 10:49:33.979275576 +0000 UTC m=+890.797835165" watchObservedRunningTime="2025-12-11 10:49:33.982860854 +0000 UTC m=+890.801420433" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.312321 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-h52ww"] Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.313249 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-h52ww" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.315169 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-g8qj9" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.332043 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-lz22l"] Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.332929 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.335038 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.341919 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-276mn"] Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.342918 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.350083 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-h52ww"] Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.365473 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-lz22l"] Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.454381 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52"] Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.455397 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.456094 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52"] Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.458014 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.458180 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f541e158-4765-46f6-9a14-f6917fa4b1e3-ovs-socket\") pod \"nmstate-handler-276mn\" (UID: \"f541e158-4765-46f6-9a14-f6917fa4b1e3\") " pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.458229 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4gxg\" (UniqueName: \"kubernetes.io/projected/32c0573d-b135-42ba-bec4-9092104e870c-kube-api-access-b4gxg\") pod \"nmstate-metrics-7f7f7578db-h52ww\" (UID: \"32c0573d-b135-42ba-bec4-9092104e870c\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-h52ww" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.458256 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/671f4389-4dd6-45c7-8eda-d60191819517-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-lz22l\" (UID: \"671f4389-4dd6-45c7-8eda-d60191819517\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.458278 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qnmf\" (UniqueName: \"kubernetes.io/projected/f541e158-4765-46f6-9a14-f6917fa4b1e3-kube-api-access-2qnmf\") pod \"nmstate-handler-276mn\" (UID: \"f541e158-4765-46f6-9a14-f6917fa4b1e3\") " pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.458327 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-bkfjj" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.458331 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.458406 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f541e158-4765-46f6-9a14-f6917fa4b1e3-nmstate-lock\") pod \"nmstate-handler-276mn\" (UID: \"f541e158-4765-46f6-9a14-f6917fa4b1e3\") " pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.458575 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f541e158-4765-46f6-9a14-f6917fa4b1e3-dbus-socket\") pod \"nmstate-handler-276mn\" (UID: \"f541e158-4765-46f6-9a14-f6917fa4b1e3\") " pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.458738 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s74bb\" (UniqueName: \"kubernetes.io/projected/671f4389-4dd6-45c7-8eda-d60191819517-kube-api-access-s74bb\") pod \"nmstate-webhook-f8fb84555-lz22l\" (UID: \"671f4389-4dd6-45c7-8eda-d60191819517\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.560473 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/35c9ce4d-504d-4813-b776-f5d07b9c3d1d-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-6wx52\" (UID: \"35c9ce4d-504d-4813-b776-f5d07b9c3d1d\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.560530 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f541e158-4765-46f6-9a14-f6917fa4b1e3-nmstate-lock\") pod \"nmstate-handler-276mn\" (UID: \"f541e158-4765-46f6-9a14-f6917fa4b1e3\") " pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.560565 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tz6c6\" (UniqueName: \"kubernetes.io/projected/35c9ce4d-504d-4813-b776-f5d07b9c3d1d-kube-api-access-tz6c6\") pod \"nmstate-console-plugin-6ff7998486-6wx52\" (UID: \"35c9ce4d-504d-4813-b776-f5d07b9c3d1d\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.560632 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f541e158-4765-46f6-9a14-f6917fa4b1e3-nmstate-lock\") pod \"nmstate-handler-276mn\" (UID: \"f541e158-4765-46f6-9a14-f6917fa4b1e3\") " pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.560711 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/35c9ce4d-504d-4813-b776-f5d07b9c3d1d-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-6wx52\" (UID: \"35c9ce4d-504d-4813-b776-f5d07b9c3d1d\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.560827 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f541e158-4765-46f6-9a14-f6917fa4b1e3-dbus-socket\") pod \"nmstate-handler-276mn\" (UID: \"f541e158-4765-46f6-9a14-f6917fa4b1e3\") " pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.560968 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s74bb\" (UniqueName: \"kubernetes.io/projected/671f4389-4dd6-45c7-8eda-d60191819517-kube-api-access-s74bb\") pod \"nmstate-webhook-f8fb84555-lz22l\" (UID: \"671f4389-4dd6-45c7-8eda-d60191819517\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.561026 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f541e158-4765-46f6-9a14-f6917fa4b1e3-ovs-socket\") pod \"nmstate-handler-276mn\" (UID: \"f541e158-4765-46f6-9a14-f6917fa4b1e3\") " pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.561060 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4gxg\" (UniqueName: \"kubernetes.io/projected/32c0573d-b135-42ba-bec4-9092104e870c-kube-api-access-b4gxg\") pod \"nmstate-metrics-7f7f7578db-h52ww\" (UID: \"32c0573d-b135-42ba-bec4-9092104e870c\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-h52ww" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.561066 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f541e158-4765-46f6-9a14-f6917fa4b1e3-ovs-socket\") pod \"nmstate-handler-276mn\" (UID: \"f541e158-4765-46f6-9a14-f6917fa4b1e3\") " pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.561092 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/671f4389-4dd6-45c7-8eda-d60191819517-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-lz22l\" (UID: \"671f4389-4dd6-45c7-8eda-d60191819517\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.561118 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qnmf\" (UniqueName: \"kubernetes.io/projected/f541e158-4765-46f6-9a14-f6917fa4b1e3-kube-api-access-2qnmf\") pod \"nmstate-handler-276mn\" (UID: \"f541e158-4765-46f6-9a14-f6917fa4b1e3\") " pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.561133 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f541e158-4765-46f6-9a14-f6917fa4b1e3-dbus-socket\") pod \"nmstate-handler-276mn\" (UID: \"f541e158-4765-46f6-9a14-f6917fa4b1e3\") " pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.580916 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/671f4389-4dd6-45c7-8eda-d60191819517-tls-key-pair\") pod \"nmstate-webhook-f8fb84555-lz22l\" (UID: \"671f4389-4dd6-45c7-8eda-d60191819517\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.586745 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s74bb\" (UniqueName: \"kubernetes.io/projected/671f4389-4dd6-45c7-8eda-d60191819517-kube-api-access-s74bb\") pod \"nmstate-webhook-f8fb84555-lz22l\" (UID: \"671f4389-4dd6-45c7-8eda-d60191819517\") " pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.586785 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qnmf\" (UniqueName: \"kubernetes.io/projected/f541e158-4765-46f6-9a14-f6917fa4b1e3-kube-api-access-2qnmf\") pod \"nmstate-handler-276mn\" (UID: \"f541e158-4765-46f6-9a14-f6917fa4b1e3\") " pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.586856 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4gxg\" (UniqueName: \"kubernetes.io/projected/32c0573d-b135-42ba-bec4-9092104e870c-kube-api-access-b4gxg\") pod \"nmstate-metrics-7f7f7578db-h52ww\" (UID: \"32c0573d-b135-42ba-bec4-9092104e870c\") " pod="openshift-nmstate/nmstate-metrics-7f7f7578db-h52ww" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.628821 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-h52ww" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.645784 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.660688 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.662182 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/35c9ce4d-504d-4813-b776-f5d07b9c3d1d-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-6wx52\" (UID: \"35c9ce4d-504d-4813-b776-f5d07b9c3d1d\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.662229 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tz6c6\" (UniqueName: \"kubernetes.io/projected/35c9ce4d-504d-4813-b776-f5d07b9c3d1d-kube-api-access-tz6c6\") pod \"nmstate-console-plugin-6ff7998486-6wx52\" (UID: \"35c9ce4d-504d-4813-b776-f5d07b9c3d1d\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.662254 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/35c9ce4d-504d-4813-b776-f5d07b9c3d1d-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-6wx52\" (UID: \"35c9ce4d-504d-4813-b776-f5d07b9c3d1d\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.663308 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/35c9ce4d-504d-4813-b776-f5d07b9c3d1d-nginx-conf\") pod \"nmstate-console-plugin-6ff7998486-6wx52\" (UID: \"35c9ce4d-504d-4813-b776-f5d07b9c3d1d\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.668589 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/35c9ce4d-504d-4813-b776-f5d07b9c3d1d-plugin-serving-cert\") pod \"nmstate-console-plugin-6ff7998486-6wx52\" (UID: \"35c9ce4d-504d-4813-b776-f5d07b9c3d1d\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.669988 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6b95dc7c9c-tdz4n"] Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.670776 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.683555 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tz6c6\" (UniqueName: \"kubernetes.io/projected/35c9ce4d-504d-4813-b776-f5d07b9c3d1d-kube-api-access-tz6c6\") pod \"nmstate-console-plugin-6ff7998486-6wx52\" (UID: \"35c9ce4d-504d-4813-b776-f5d07b9c3d1d\") " pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.684011 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6b95dc7c9c-tdz4n"] Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.763692 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/cb6df42a-7e70-456c-9d3e-49e2eea84266-oauth-serving-cert\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.763748 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/cb6df42a-7e70-456c-9d3e-49e2eea84266-console-oauth-config\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.763794 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/cb6df42a-7e70-456c-9d3e-49e2eea84266-console-serving-cert\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.763834 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cb6df42a-7e70-456c-9d3e-49e2eea84266-trusted-ca-bundle\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.763879 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/cb6df42a-7e70-456c-9d3e-49e2eea84266-service-ca\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.763909 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/cb6df42a-7e70-456c-9d3e-49e2eea84266-console-config\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.763964 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzkfp\" (UniqueName: \"kubernetes.io/projected/cb6df42a-7e70-456c-9d3e-49e2eea84266-kube-api-access-gzkfp\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.773066 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.865515 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/cb6df42a-7e70-456c-9d3e-49e2eea84266-service-ca\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.866003 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/cb6df42a-7e70-456c-9d3e-49e2eea84266-console-config\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.866036 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzkfp\" (UniqueName: \"kubernetes.io/projected/cb6df42a-7e70-456c-9d3e-49e2eea84266-kube-api-access-gzkfp\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.866074 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/cb6df42a-7e70-456c-9d3e-49e2eea84266-oauth-serving-cert\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.866101 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/cb6df42a-7e70-456c-9d3e-49e2eea84266-console-oauth-config\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.866132 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/cb6df42a-7e70-456c-9d3e-49e2eea84266-console-serving-cert\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.866168 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cb6df42a-7e70-456c-9d3e-49e2eea84266-trusted-ca-bundle\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.867681 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/cb6df42a-7e70-456c-9d3e-49e2eea84266-console-config\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.869286 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/cb6df42a-7e70-456c-9d3e-49e2eea84266-service-ca\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.870910 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/cb6df42a-7e70-456c-9d3e-49e2eea84266-oauth-serving-cert\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.873354 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/cb6df42a-7e70-456c-9d3e-49e2eea84266-console-serving-cert\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.874048 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cb6df42a-7e70-456c-9d3e-49e2eea84266-trusted-ca-bundle\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.875626 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/cb6df42a-7e70-456c-9d3e-49e2eea84266-console-oauth-config\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.885168 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzkfp\" (UniqueName: \"kubernetes.io/projected/cb6df42a-7e70-456c-9d3e-49e2eea84266-kube-api-access-gzkfp\") pod \"console-6b95dc7c9c-tdz4n\" (UID: \"cb6df42a-7e70-456c-9d3e-49e2eea84266\") " pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.946390 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-f8fb84555-lz22l"] Dec 11 10:49:35 crc kubenswrapper[5016]: W1211 10:49:35.961152 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod671f4389_4dd6_45c7_8eda_d60191819517.slice/crio-1563326740beccb3c95e94959d8ab4638149ff6b7476380306e8e847af5da289 WatchSource:0}: Error finding container 1563326740beccb3c95e94959d8ab4638149ff6b7476380306e8e847af5da289: Status 404 returned error can't find the container with id 1563326740beccb3c95e94959d8ab4638149ff6b7476380306e8e847af5da289 Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.973027 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-276mn" event={"ID":"f541e158-4765-46f6-9a14-f6917fa4b1e3","Type":"ContainerStarted","Data":"7d46251647c880508f0765b57113590420550a1a4a997861aa18c919c9641826"} Dec 11 10:49:35 crc kubenswrapper[5016]: I1211 10:49:35.974425 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" event={"ID":"671f4389-4dd6-45c7-8eda-d60191819517","Type":"ContainerStarted","Data":"1563326740beccb3c95e94959d8ab4638149ff6b7476380306e8e847af5da289"} Dec 11 10:49:36 crc kubenswrapper[5016]: I1211 10:49:36.005301 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f7f7578db-h52ww"] Dec 11 10:49:36 crc kubenswrapper[5016]: W1211 10:49:36.019242 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod32c0573d_b135_42ba_bec4_9092104e870c.slice/crio-6730e840b2867b035609135a665765aba94dd545abb7d78519cbfb659f4752f6 WatchSource:0}: Error finding container 6730e840b2867b035609135a665765aba94dd545abb7d78519cbfb659f4752f6: Status 404 returned error can't find the container with id 6730e840b2867b035609135a665765aba94dd545abb7d78519cbfb659f4752f6 Dec 11 10:49:36 crc kubenswrapper[5016]: I1211 10:49:36.046677 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:36 crc kubenswrapper[5016]: I1211 10:49:36.064749 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52"] Dec 11 10:49:36 crc kubenswrapper[5016]: W1211 10:49:36.075120 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35c9ce4d_504d_4813_b776_f5d07b9c3d1d.slice/crio-8411566280a710c4a18fd2d1bbae7889a3d242a0db43975095d2d0f536ef7b93 WatchSource:0}: Error finding container 8411566280a710c4a18fd2d1bbae7889a3d242a0db43975095d2d0f536ef7b93: Status 404 returned error can't find the container with id 8411566280a710c4a18fd2d1bbae7889a3d242a0db43975095d2d0f536ef7b93 Dec 11 10:49:36 crc kubenswrapper[5016]: I1211 10:49:36.528862 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6b95dc7c9c-tdz4n"] Dec 11 10:49:36 crc kubenswrapper[5016]: I1211 10:49:36.981619 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-h52ww" event={"ID":"32c0573d-b135-42ba-bec4-9092104e870c","Type":"ContainerStarted","Data":"6730e840b2867b035609135a665765aba94dd545abb7d78519cbfb659f4752f6"} Dec 11 10:49:36 crc kubenswrapper[5016]: I1211 10:49:36.984758 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6b95dc7c9c-tdz4n" event={"ID":"cb6df42a-7e70-456c-9d3e-49e2eea84266","Type":"ContainerStarted","Data":"4e9b82361c0028a0191c70fe708855d4802764e5b167af761bbcab08bd8a57c9"} Dec 11 10:49:36 crc kubenswrapper[5016]: I1211 10:49:36.984785 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6b95dc7c9c-tdz4n" event={"ID":"cb6df42a-7e70-456c-9d3e-49e2eea84266","Type":"ContainerStarted","Data":"49369d9b6c8b8d3053c294ab7033924ea5f29c4e8406c50715a07b48898e1f46"} Dec 11 10:49:36 crc kubenswrapper[5016]: I1211 10:49:36.987254 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" event={"ID":"35c9ce4d-504d-4813-b776-f5d07b9c3d1d","Type":"ContainerStarted","Data":"8411566280a710c4a18fd2d1bbae7889a3d242a0db43975095d2d0f536ef7b93"} Dec 11 10:49:37 crc kubenswrapper[5016]: I1211 10:49:37.007841 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6b95dc7c9c-tdz4n" podStartSLOduration=2.007824116 podStartE2EDuration="2.007824116s" podCreationTimestamp="2025-12-11 10:49:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:49:37.004172366 +0000 UTC m=+893.822731965" watchObservedRunningTime="2025-12-11 10:49:37.007824116 +0000 UTC m=+893.826383695" Dec 11 10:49:37 crc kubenswrapper[5016]: I1211 10:49:37.807434 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:37 crc kubenswrapper[5016]: I1211 10:49:37.848888 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:38 crc kubenswrapper[5016]: I1211 10:49:38.041053 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sdvm6"] Dec 11 10:49:38 crc kubenswrapper[5016]: I1211 10:49:38.998621 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-sdvm6" podUID="c70c967d-c125-452b-95b7-8e590202479e" containerName="registry-server" containerID="cri-o://af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3" gracePeriod=2 Dec 11 10:49:39 crc kubenswrapper[5016]: I1211 10:49:39.380144 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:39 crc kubenswrapper[5016]: I1211 10:49:39.511876 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c70c967d-c125-452b-95b7-8e590202479e-utilities\") pod \"c70c967d-c125-452b-95b7-8e590202479e\" (UID: \"c70c967d-c125-452b-95b7-8e590202479e\") " Dec 11 10:49:39 crc kubenswrapper[5016]: I1211 10:49:39.512014 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrl2j\" (UniqueName: \"kubernetes.io/projected/c70c967d-c125-452b-95b7-8e590202479e-kube-api-access-nrl2j\") pod \"c70c967d-c125-452b-95b7-8e590202479e\" (UID: \"c70c967d-c125-452b-95b7-8e590202479e\") " Dec 11 10:49:39 crc kubenswrapper[5016]: I1211 10:49:39.512057 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c70c967d-c125-452b-95b7-8e590202479e-catalog-content\") pod \"c70c967d-c125-452b-95b7-8e590202479e\" (UID: \"c70c967d-c125-452b-95b7-8e590202479e\") " Dec 11 10:49:39 crc kubenswrapper[5016]: I1211 10:49:39.513203 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c70c967d-c125-452b-95b7-8e590202479e-utilities" (OuterVolumeSpecName: "utilities") pod "c70c967d-c125-452b-95b7-8e590202479e" (UID: "c70c967d-c125-452b-95b7-8e590202479e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:49:39 crc kubenswrapper[5016]: I1211 10:49:39.513745 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c70c967d-c125-452b-95b7-8e590202479e-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:49:39 crc kubenswrapper[5016]: I1211 10:49:39.518300 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c70c967d-c125-452b-95b7-8e590202479e-kube-api-access-nrl2j" (OuterVolumeSpecName: "kube-api-access-nrl2j") pod "c70c967d-c125-452b-95b7-8e590202479e" (UID: "c70c967d-c125-452b-95b7-8e590202479e"). InnerVolumeSpecName "kube-api-access-nrl2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:49:39 crc kubenswrapper[5016]: I1211 10:49:39.615023 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrl2j\" (UniqueName: \"kubernetes.io/projected/c70c967d-c125-452b-95b7-8e590202479e-kube-api-access-nrl2j\") on node \"crc\" DevicePath \"\"" Dec 11 10:49:39 crc kubenswrapper[5016]: I1211 10:49:39.626133 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c70c967d-c125-452b-95b7-8e590202479e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c70c967d-c125-452b-95b7-8e590202479e" (UID: "c70c967d-c125-452b-95b7-8e590202479e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:49:39 crc kubenswrapper[5016]: I1211 10:49:39.716381 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c70c967d-c125-452b-95b7-8e590202479e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.019738 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" event={"ID":"35c9ce4d-504d-4813-b776-f5d07b9c3d1d","Type":"ContainerStarted","Data":"7a168ef706efc1ffd900e25bff868b2b41f934f756caba6cf21e71cc359c0366"} Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.023289 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-276mn" event={"ID":"f541e158-4765-46f6-9a14-f6917fa4b1e3","Type":"ContainerStarted","Data":"4405808bbfcd59764a01dd02beffb8f05a200a24965cb8bdb2339bd728bc3585"} Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.023450 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.024869 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-h52ww" event={"ID":"32c0573d-b135-42ba-bec4-9092104e870c","Type":"ContainerStarted","Data":"c1020ab3ff029bcba20c3d7eefd35cef292a6fe2b492a302f93a738f34a06e57"} Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.026304 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" event={"ID":"671f4389-4dd6-45c7-8eda-d60191819517","Type":"ContainerStarted","Data":"2093f27a057903a1b0397601282f8e8a4aa5587b2481c759d72445656142b448"} Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.026837 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.029120 5016 generic.go:334] "Generic (PLEG): container finished" podID="c70c967d-c125-452b-95b7-8e590202479e" containerID="af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3" exitCode=0 Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.029161 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdvm6" event={"ID":"c70c967d-c125-452b-95b7-8e590202479e","Type":"ContainerDied","Data":"af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3"} Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.029185 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sdvm6" event={"ID":"c70c967d-c125-452b-95b7-8e590202479e","Type":"ContainerDied","Data":"91e142bfda392fb483ff9692935ccef42bad84582c80436d669842ebf58d3fe2"} Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.029207 5016 scope.go:117] "RemoveContainer" containerID="af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.029339 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sdvm6" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.049071 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-6ff7998486-6wx52" podStartSLOduration=2.059881527 podStartE2EDuration="5.049053038s" podCreationTimestamp="2025-12-11 10:49:35 +0000 UTC" firstStartedPulling="2025-12-11 10:49:36.0771771 +0000 UTC m=+892.895736679" lastFinishedPulling="2025-12-11 10:49:39.066348611 +0000 UTC m=+895.884908190" observedRunningTime="2025-12-11 10:49:40.042424414 +0000 UTC m=+896.860983993" watchObservedRunningTime="2025-12-11 10:49:40.049053038 +0000 UTC m=+896.867612617" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.068628 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" podStartSLOduration=1.957534129 podStartE2EDuration="5.068605709s" podCreationTimestamp="2025-12-11 10:49:35 +0000 UTC" firstStartedPulling="2025-12-11 10:49:35.963250557 +0000 UTC m=+892.781810136" lastFinishedPulling="2025-12-11 10:49:39.074322137 +0000 UTC m=+895.892881716" observedRunningTime="2025-12-11 10:49:40.065510803 +0000 UTC m=+896.884070382" watchObservedRunningTime="2025-12-11 10:49:40.068605709 +0000 UTC m=+896.887165288" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.081270 5016 scope.go:117] "RemoveContainer" containerID="2dfad9d40b51fd7718e0ab4288b830ee9dce4cc22e06e94b14694e662f7e805c" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.084558 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-276mn" podStartSLOduration=1.703639669 podStartE2EDuration="5.084538541s" podCreationTimestamp="2025-12-11 10:49:35 +0000 UTC" firstStartedPulling="2025-12-11 10:49:35.705720898 +0000 UTC m=+892.524280477" lastFinishedPulling="2025-12-11 10:49:39.08661977 +0000 UTC m=+895.905179349" observedRunningTime="2025-12-11 10:49:40.083852934 +0000 UTC m=+896.902412543" watchObservedRunningTime="2025-12-11 10:49:40.084538541 +0000 UTC m=+896.903098140" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.106576 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sdvm6"] Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.111412 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-sdvm6"] Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.121064 5016 scope.go:117] "RemoveContainer" containerID="7d1910dbb7665a7ec85bce054326b987b17aeabdaf5efa5bd2e98c13af8f8d7f" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.137534 5016 scope.go:117] "RemoveContainer" containerID="af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3" Dec 11 10:49:40 crc kubenswrapper[5016]: E1211 10:49:40.138099 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3\": container with ID starting with af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3 not found: ID does not exist" containerID="af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.138138 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3"} err="failed to get container status \"af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3\": rpc error: code = NotFound desc = could not find container \"af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3\": container with ID starting with af04b0db0517522619b5049919bd42ec24613d03be12defa13f94440161db0d3 not found: ID does not exist" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.138167 5016 scope.go:117] "RemoveContainer" containerID="2dfad9d40b51fd7718e0ab4288b830ee9dce4cc22e06e94b14694e662f7e805c" Dec 11 10:49:40 crc kubenswrapper[5016]: E1211 10:49:40.138433 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2dfad9d40b51fd7718e0ab4288b830ee9dce4cc22e06e94b14694e662f7e805c\": container with ID starting with 2dfad9d40b51fd7718e0ab4288b830ee9dce4cc22e06e94b14694e662f7e805c not found: ID does not exist" containerID="2dfad9d40b51fd7718e0ab4288b830ee9dce4cc22e06e94b14694e662f7e805c" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.138560 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2dfad9d40b51fd7718e0ab4288b830ee9dce4cc22e06e94b14694e662f7e805c"} err="failed to get container status \"2dfad9d40b51fd7718e0ab4288b830ee9dce4cc22e06e94b14694e662f7e805c\": rpc error: code = NotFound desc = could not find container \"2dfad9d40b51fd7718e0ab4288b830ee9dce4cc22e06e94b14694e662f7e805c\": container with ID starting with 2dfad9d40b51fd7718e0ab4288b830ee9dce4cc22e06e94b14694e662f7e805c not found: ID does not exist" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.138658 5016 scope.go:117] "RemoveContainer" containerID="7d1910dbb7665a7ec85bce054326b987b17aeabdaf5efa5bd2e98c13af8f8d7f" Dec 11 10:49:40 crc kubenswrapper[5016]: E1211 10:49:40.139057 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d1910dbb7665a7ec85bce054326b987b17aeabdaf5efa5bd2e98c13af8f8d7f\": container with ID starting with 7d1910dbb7665a7ec85bce054326b987b17aeabdaf5efa5bd2e98c13af8f8d7f not found: ID does not exist" containerID="7d1910dbb7665a7ec85bce054326b987b17aeabdaf5efa5bd2e98c13af8f8d7f" Dec 11 10:49:40 crc kubenswrapper[5016]: I1211 10:49:40.139156 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d1910dbb7665a7ec85bce054326b987b17aeabdaf5efa5bd2e98c13af8f8d7f"} err="failed to get container status \"7d1910dbb7665a7ec85bce054326b987b17aeabdaf5efa5bd2e98c13af8f8d7f\": rpc error: code = NotFound desc = could not find container \"7d1910dbb7665a7ec85bce054326b987b17aeabdaf5efa5bd2e98c13af8f8d7f\": container with ID starting with 7d1910dbb7665a7ec85bce054326b987b17aeabdaf5efa5bd2e98c13af8f8d7f not found: ID does not exist" Dec 11 10:49:41 crc kubenswrapper[5016]: I1211 10:49:41.493047 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c70c967d-c125-452b-95b7-8e590202479e" path="/var/lib/kubelet/pods/c70c967d-c125-452b-95b7-8e590202479e/volumes" Dec 11 10:49:42 crc kubenswrapper[5016]: I1211 10:49:42.043891 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-h52ww" event={"ID":"32c0573d-b135-42ba-bec4-9092104e870c","Type":"ContainerStarted","Data":"25d564248a62aefbedec1797b723e703ff87daa0d06230ea94f91b0c2dc99daf"} Dec 11 10:49:42 crc kubenswrapper[5016]: I1211 10:49:42.059284 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f7f7578db-h52ww" podStartSLOduration=1.609476921 podStartE2EDuration="7.059259604s" podCreationTimestamp="2025-12-11 10:49:35 +0000 UTC" firstStartedPulling="2025-12-11 10:49:36.02438021 +0000 UTC m=+892.842939789" lastFinishedPulling="2025-12-11 10:49:41.474162893 +0000 UTC m=+898.292722472" observedRunningTime="2025-12-11 10:49:42.057804398 +0000 UTC m=+898.876364007" watchObservedRunningTime="2025-12-11 10:49:42.059259604 +0000 UTC m=+898.877819183" Dec 11 10:49:45 crc kubenswrapper[5016]: I1211 10:49:45.687634 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-276mn" Dec 11 10:49:46 crc kubenswrapper[5016]: I1211 10:49:46.047743 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:46 crc kubenswrapper[5016]: I1211 10:49:46.047807 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:46 crc kubenswrapper[5016]: I1211 10:49:46.059478 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:46 crc kubenswrapper[5016]: I1211 10:49:46.073820 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6b95dc7c9c-tdz4n" Dec 11 10:49:46 crc kubenswrapper[5016]: I1211 10:49:46.145628 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-jpxgn"] Dec 11 10:49:55 crc kubenswrapper[5016]: I1211 10:49:55.655607 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-f8fb84555-lz22l" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.189799 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-jpxgn" podUID="fa3166f9-577e-4994-9290-7ced66d69dcc" containerName="console" containerID="cri-o://c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c" gracePeriod=15 Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.576254 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-jpxgn_fa3166f9-577e-4994-9290-7ced66d69dcc/console/0.log" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.576616 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.720888 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-console-config\") pod \"fa3166f9-577e-4994-9290-7ced66d69dcc\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.721027 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-oauth-serving-cert\") pod \"fa3166f9-577e-4994-9290-7ced66d69dcc\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.721062 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-service-ca\") pod \"fa3166f9-577e-4994-9290-7ced66d69dcc\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.721090 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg45t\" (UniqueName: \"kubernetes.io/projected/fa3166f9-577e-4994-9290-7ced66d69dcc-kube-api-access-qg45t\") pod \"fa3166f9-577e-4994-9290-7ced66d69dcc\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.721157 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fa3166f9-577e-4994-9290-7ced66d69dcc-console-serving-cert\") pod \"fa3166f9-577e-4994-9290-7ced66d69dcc\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.721209 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fa3166f9-577e-4994-9290-7ced66d69dcc-console-oauth-config\") pod \"fa3166f9-577e-4994-9290-7ced66d69dcc\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.721796 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "fa3166f9-577e-4994-9290-7ced66d69dcc" (UID: "fa3166f9-577e-4994-9290-7ced66d69dcc"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.721806 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-console-config" (OuterVolumeSpecName: "console-config") pod "fa3166f9-577e-4994-9290-7ced66d69dcc" (UID: "fa3166f9-577e-4994-9290-7ced66d69dcc"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.721967 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-service-ca" (OuterVolumeSpecName: "service-ca") pod "fa3166f9-577e-4994-9290-7ced66d69dcc" (UID: "fa3166f9-577e-4994-9290-7ced66d69dcc"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.722110 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-trusted-ca-bundle\") pod \"fa3166f9-577e-4994-9290-7ced66d69dcc\" (UID: \"fa3166f9-577e-4994-9290-7ced66d69dcc\") " Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.722360 5016 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-console-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.722379 5016 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.722388 5016 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-service-ca\") on node \"crc\" DevicePath \"\"" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.722563 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "fa3166f9-577e-4994-9290-7ced66d69dcc" (UID: "fa3166f9-577e-4994-9290-7ced66d69dcc"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.727391 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa3166f9-577e-4994-9290-7ced66d69dcc-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "fa3166f9-577e-4994-9290-7ced66d69dcc" (UID: "fa3166f9-577e-4994-9290-7ced66d69dcc"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.727462 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa3166f9-577e-4994-9290-7ced66d69dcc-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "fa3166f9-577e-4994-9290-7ced66d69dcc" (UID: "fa3166f9-577e-4994-9290-7ced66d69dcc"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.745063 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa3166f9-577e-4994-9290-7ced66d69dcc-kube-api-access-qg45t" (OuterVolumeSpecName: "kube-api-access-qg45t") pod "fa3166f9-577e-4994-9290-7ced66d69dcc" (UID: "fa3166f9-577e-4994-9290-7ced66d69dcc"). InnerVolumeSpecName "kube-api-access-qg45t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.823272 5016 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fa3166f9-577e-4994-9290-7ced66d69dcc-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.823311 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg45t\" (UniqueName: \"kubernetes.io/projected/fa3166f9-577e-4994-9290-7ced66d69dcc-kube-api-access-qg45t\") on node \"crc\" DevicePath \"\"" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.823322 5016 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fa3166f9-577e-4994-9290-7ced66d69dcc-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 11 10:50:11 crc kubenswrapper[5016]: I1211 10:50:11.823331 5016 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fa3166f9-577e-4994-9290-7ced66d69dcc-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.251743 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-jpxgn_fa3166f9-577e-4994-9290-7ced66d69dcc/console/0.log" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.251791 5016 generic.go:334] "Generic (PLEG): container finished" podID="fa3166f9-577e-4994-9290-7ced66d69dcc" containerID="c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c" exitCode=2 Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.251820 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jpxgn" event={"ID":"fa3166f9-577e-4994-9290-7ced66d69dcc","Type":"ContainerDied","Data":"c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c"} Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.251844 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jpxgn" event={"ID":"fa3166f9-577e-4994-9290-7ced66d69dcc","Type":"ContainerDied","Data":"9f6adef18ccb4454b2866fdb14291125e8fff76c0bd7e44ba9aeeb8058d198e0"} Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.251852 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jpxgn" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.251862 5016 scope.go:117] "RemoveContainer" containerID="c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.282628 5016 scope.go:117] "RemoveContainer" containerID="c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c" Dec 11 10:50:12 crc kubenswrapper[5016]: E1211 10:50:12.283353 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c\": container with ID starting with c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c not found: ID does not exist" containerID="c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.283408 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c"} err="failed to get container status \"c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c\": rpc error: code = NotFound desc = could not find container \"c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c\": container with ID starting with c842f8f2f71e190bd49b9ff52feb171fceb19353b2d911efda78efaa44b3f42c not found: ID does not exist" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.292372 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-jpxgn"] Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.299186 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-jpxgn"] Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.743712 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld"] Dec 11 10:50:12 crc kubenswrapper[5016]: E1211 10:50:12.744302 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c70c967d-c125-452b-95b7-8e590202479e" containerName="extract-content" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.744319 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="c70c967d-c125-452b-95b7-8e590202479e" containerName="extract-content" Dec 11 10:50:12 crc kubenswrapper[5016]: E1211 10:50:12.744333 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c70c967d-c125-452b-95b7-8e590202479e" containerName="extract-utilities" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.744340 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="c70c967d-c125-452b-95b7-8e590202479e" containerName="extract-utilities" Dec 11 10:50:12 crc kubenswrapper[5016]: E1211 10:50:12.744352 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa3166f9-577e-4994-9290-7ced66d69dcc" containerName="console" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.744358 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa3166f9-577e-4994-9290-7ced66d69dcc" containerName="console" Dec 11 10:50:12 crc kubenswrapper[5016]: E1211 10:50:12.744367 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c70c967d-c125-452b-95b7-8e590202479e" containerName="registry-server" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.744375 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="c70c967d-c125-452b-95b7-8e590202479e" containerName="registry-server" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.744488 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="c70c967d-c125-452b-95b7-8e590202479e" containerName="registry-server" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.744502 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa3166f9-577e-4994-9290-7ced66d69dcc" containerName="console" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.745605 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.747801 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.751158 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld"] Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.834590 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/148dba01-3eb3-49e5-8662-3824d9933a4c-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld\" (UID: \"148dba01-3eb3-49e5-8662-3824d9933a4c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.834663 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/148dba01-3eb3-49e5-8662-3824d9933a4c-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld\" (UID: \"148dba01-3eb3-49e5-8662-3824d9933a4c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.834902 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jz5b\" (UniqueName: \"kubernetes.io/projected/148dba01-3eb3-49e5-8662-3824d9933a4c-kube-api-access-7jz5b\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld\" (UID: \"148dba01-3eb3-49e5-8662-3824d9933a4c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.936072 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/148dba01-3eb3-49e5-8662-3824d9933a4c-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld\" (UID: \"148dba01-3eb3-49e5-8662-3824d9933a4c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.936186 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jz5b\" (UniqueName: \"kubernetes.io/projected/148dba01-3eb3-49e5-8662-3824d9933a4c-kube-api-access-7jz5b\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld\" (UID: \"148dba01-3eb3-49e5-8662-3824d9933a4c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.936253 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/148dba01-3eb3-49e5-8662-3824d9933a4c-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld\" (UID: \"148dba01-3eb3-49e5-8662-3824d9933a4c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.936816 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/148dba01-3eb3-49e5-8662-3824d9933a4c-util\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld\" (UID: \"148dba01-3eb3-49e5-8662-3824d9933a4c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.938418 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/148dba01-3eb3-49e5-8662-3824d9933a4c-bundle\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld\" (UID: \"148dba01-3eb3-49e5-8662-3824d9933a4c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:12 crc kubenswrapper[5016]: I1211 10:50:12.963274 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jz5b\" (UniqueName: \"kubernetes.io/projected/148dba01-3eb3-49e5-8662-3824d9933a4c-kube-api-access-7jz5b\") pod \"5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld\" (UID: \"148dba01-3eb3-49e5-8662-3824d9933a4c\") " pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:13 crc kubenswrapper[5016]: I1211 10:50:13.059795 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:13 crc kubenswrapper[5016]: I1211 10:50:13.269347 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld"] Dec 11 10:50:13 crc kubenswrapper[5016]: I1211 10:50:13.484025 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa3166f9-577e-4994-9290-7ced66d69dcc" path="/var/lib/kubelet/pods/fa3166f9-577e-4994-9290-7ced66d69dcc/volumes" Dec 11 10:50:14 crc kubenswrapper[5016]: I1211 10:50:14.271926 5016 generic.go:334] "Generic (PLEG): container finished" podID="148dba01-3eb3-49e5-8662-3824d9933a4c" containerID="45d2a927b094cbeb8da8d98c0ddc6114be9d22643f74d02e9a28ac1f1c9518f1" exitCode=0 Dec 11 10:50:14 crc kubenswrapper[5016]: I1211 10:50:14.272030 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" event={"ID":"148dba01-3eb3-49e5-8662-3824d9933a4c","Type":"ContainerDied","Data":"45d2a927b094cbeb8da8d98c0ddc6114be9d22643f74d02e9a28ac1f1c9518f1"} Dec 11 10:50:14 crc kubenswrapper[5016]: I1211 10:50:14.272118 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" event={"ID":"148dba01-3eb3-49e5-8662-3824d9933a4c","Type":"ContainerStarted","Data":"452d9bf53991ed44cbdf01daeb93665548effa65d5206c7a82cee7121d402d94"} Dec 11 10:50:17 crc kubenswrapper[5016]: I1211 10:50:17.298506 5016 generic.go:334] "Generic (PLEG): container finished" podID="148dba01-3eb3-49e5-8662-3824d9933a4c" containerID="b19326759fde735bd7535ed98b2cf64b6d2c4cf65f368d1588e5514238f697be" exitCode=0 Dec 11 10:50:17 crc kubenswrapper[5016]: I1211 10:50:17.298595 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" event={"ID":"148dba01-3eb3-49e5-8662-3824d9933a4c","Type":"ContainerDied","Data":"b19326759fde735bd7535ed98b2cf64b6d2c4cf65f368d1588e5514238f697be"} Dec 11 10:50:18 crc kubenswrapper[5016]: I1211 10:50:18.310762 5016 generic.go:334] "Generic (PLEG): container finished" podID="148dba01-3eb3-49e5-8662-3824d9933a4c" containerID="8326f46ea3d997e53ba99116e4fd9ae8af15ad096ce942c33797eff81491ff87" exitCode=0 Dec 11 10:50:18 crc kubenswrapper[5016]: I1211 10:50:18.310892 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" event={"ID":"148dba01-3eb3-49e5-8662-3824d9933a4c","Type":"ContainerDied","Data":"8326f46ea3d997e53ba99116e4fd9ae8af15ad096ce942c33797eff81491ff87"} Dec 11 10:50:19 crc kubenswrapper[5016]: I1211 10:50:19.557956 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:19 crc kubenswrapper[5016]: I1211 10:50:19.648990 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/148dba01-3eb3-49e5-8662-3824d9933a4c-bundle\") pod \"148dba01-3eb3-49e5-8662-3824d9933a4c\" (UID: \"148dba01-3eb3-49e5-8662-3824d9933a4c\") " Dec 11 10:50:19 crc kubenswrapper[5016]: I1211 10:50:19.649082 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jz5b\" (UniqueName: \"kubernetes.io/projected/148dba01-3eb3-49e5-8662-3824d9933a4c-kube-api-access-7jz5b\") pod \"148dba01-3eb3-49e5-8662-3824d9933a4c\" (UID: \"148dba01-3eb3-49e5-8662-3824d9933a4c\") " Dec 11 10:50:19 crc kubenswrapper[5016]: I1211 10:50:19.649160 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/148dba01-3eb3-49e5-8662-3824d9933a4c-util\") pod \"148dba01-3eb3-49e5-8662-3824d9933a4c\" (UID: \"148dba01-3eb3-49e5-8662-3824d9933a4c\") " Dec 11 10:50:19 crc kubenswrapper[5016]: I1211 10:50:19.650919 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/148dba01-3eb3-49e5-8662-3824d9933a4c-bundle" (OuterVolumeSpecName: "bundle") pod "148dba01-3eb3-49e5-8662-3824d9933a4c" (UID: "148dba01-3eb3-49e5-8662-3824d9933a4c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:50:19 crc kubenswrapper[5016]: I1211 10:50:19.654737 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/148dba01-3eb3-49e5-8662-3824d9933a4c-kube-api-access-7jz5b" (OuterVolumeSpecName: "kube-api-access-7jz5b") pod "148dba01-3eb3-49e5-8662-3824d9933a4c" (UID: "148dba01-3eb3-49e5-8662-3824d9933a4c"). InnerVolumeSpecName "kube-api-access-7jz5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:50:19 crc kubenswrapper[5016]: I1211 10:50:19.664421 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/148dba01-3eb3-49e5-8662-3824d9933a4c-util" (OuterVolumeSpecName: "util") pod "148dba01-3eb3-49e5-8662-3824d9933a4c" (UID: "148dba01-3eb3-49e5-8662-3824d9933a4c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:50:19 crc kubenswrapper[5016]: I1211 10:50:19.750436 5016 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/148dba01-3eb3-49e5-8662-3824d9933a4c-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:50:19 crc kubenswrapper[5016]: I1211 10:50:19.750483 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jz5b\" (UniqueName: \"kubernetes.io/projected/148dba01-3eb3-49e5-8662-3824d9933a4c-kube-api-access-7jz5b\") on node \"crc\" DevicePath \"\"" Dec 11 10:50:19 crc kubenswrapper[5016]: I1211 10:50:19.750497 5016 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/148dba01-3eb3-49e5-8662-3824d9933a4c-util\") on node \"crc\" DevicePath \"\"" Dec 11 10:50:20 crc kubenswrapper[5016]: I1211 10:50:20.325385 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" event={"ID":"148dba01-3eb3-49e5-8662-3824d9933a4c","Type":"ContainerDied","Data":"452d9bf53991ed44cbdf01daeb93665548effa65d5206c7a82cee7121d402d94"} Dec 11 10:50:20 crc kubenswrapper[5016]: I1211 10:50:20.325436 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="452d9bf53991ed44cbdf01daeb93665548effa65d5206c7a82cee7121d402d94" Dec 11 10:50:20 crc kubenswrapper[5016]: I1211 10:50:20.325480 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.891607 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt"] Dec 11 10:50:30 crc kubenswrapper[5016]: E1211 10:50:30.892334 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="148dba01-3eb3-49e5-8662-3824d9933a4c" containerName="util" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.892348 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="148dba01-3eb3-49e5-8662-3824d9933a4c" containerName="util" Dec 11 10:50:30 crc kubenswrapper[5016]: E1211 10:50:30.892360 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="148dba01-3eb3-49e5-8662-3824d9933a4c" containerName="pull" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.892366 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="148dba01-3eb3-49e5-8662-3824d9933a4c" containerName="pull" Dec 11 10:50:30 crc kubenswrapper[5016]: E1211 10:50:30.892377 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="148dba01-3eb3-49e5-8662-3824d9933a4c" containerName="extract" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.892384 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="148dba01-3eb3-49e5-8662-3824d9933a4c" containerName="extract" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.892477 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="148dba01-3eb3-49e5-8662-3824d9933a4c" containerName="extract" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.892930 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.907629 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-ns5d8" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.907813 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.908305 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.908505 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.908658 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.916943 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt"] Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.995986 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bdd623de-6c7c-46b2-a168-fabbbf16ce6c-apiservice-cert\") pod \"metallb-operator-controller-manager-6fcfbcfbcf-dfwbt\" (UID: \"bdd623de-6c7c-46b2-a168-fabbbf16ce6c\") " pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.996234 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bdd623de-6c7c-46b2-a168-fabbbf16ce6c-webhook-cert\") pod \"metallb-operator-controller-manager-6fcfbcfbcf-dfwbt\" (UID: \"bdd623de-6c7c-46b2-a168-fabbbf16ce6c\") " pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:50:30 crc kubenswrapper[5016]: I1211 10:50:30.996442 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dpzc\" (UniqueName: \"kubernetes.io/projected/bdd623de-6c7c-46b2-a168-fabbbf16ce6c-kube-api-access-4dpzc\") pod \"metallb-operator-controller-manager-6fcfbcfbcf-dfwbt\" (UID: \"bdd623de-6c7c-46b2-a168-fabbbf16ce6c\") " pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.097913 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bdd623de-6c7c-46b2-a168-fabbbf16ce6c-apiservice-cert\") pod \"metallb-operator-controller-manager-6fcfbcfbcf-dfwbt\" (UID: \"bdd623de-6c7c-46b2-a168-fabbbf16ce6c\") " pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.098113 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bdd623de-6c7c-46b2-a168-fabbbf16ce6c-webhook-cert\") pod \"metallb-operator-controller-manager-6fcfbcfbcf-dfwbt\" (UID: \"bdd623de-6c7c-46b2-a168-fabbbf16ce6c\") " pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.098181 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dpzc\" (UniqueName: \"kubernetes.io/projected/bdd623de-6c7c-46b2-a168-fabbbf16ce6c-kube-api-access-4dpzc\") pod \"metallb-operator-controller-manager-6fcfbcfbcf-dfwbt\" (UID: \"bdd623de-6c7c-46b2-a168-fabbbf16ce6c\") " pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.107616 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bdd623de-6c7c-46b2-a168-fabbbf16ce6c-apiservice-cert\") pod \"metallb-operator-controller-manager-6fcfbcfbcf-dfwbt\" (UID: \"bdd623de-6c7c-46b2-a168-fabbbf16ce6c\") " pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.117344 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bdd623de-6c7c-46b2-a168-fabbbf16ce6c-webhook-cert\") pod \"metallb-operator-controller-manager-6fcfbcfbcf-dfwbt\" (UID: \"bdd623de-6c7c-46b2-a168-fabbbf16ce6c\") " pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.121740 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dpzc\" (UniqueName: \"kubernetes.io/projected/bdd623de-6c7c-46b2-a168-fabbbf16ce6c-kube-api-access-4dpzc\") pod \"metallb-operator-controller-manager-6fcfbcfbcf-dfwbt\" (UID: \"bdd623de-6c7c-46b2-a168-fabbbf16ce6c\") " pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.210095 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.234553 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c896697b-mstrs"] Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.237585 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.239800 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-zj5jw" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.240471 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.240711 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.254405 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c896697b-mstrs"] Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.403031 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8b488ea8-aba6-430a-bb39-d1459ef2edea-apiservice-cert\") pod \"metallb-operator-webhook-server-7c896697b-mstrs\" (UID: \"8b488ea8-aba6-430a-bb39-d1459ef2edea\") " pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.403586 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8b488ea8-aba6-430a-bb39-d1459ef2edea-webhook-cert\") pod \"metallb-operator-webhook-server-7c896697b-mstrs\" (UID: \"8b488ea8-aba6-430a-bb39-d1459ef2edea\") " pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.403634 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmxzm\" (UniqueName: \"kubernetes.io/projected/8b488ea8-aba6-430a-bb39-d1459ef2edea-kube-api-access-qmxzm\") pod \"metallb-operator-webhook-server-7c896697b-mstrs\" (UID: \"8b488ea8-aba6-430a-bb39-d1459ef2edea\") " pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.505223 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8b488ea8-aba6-430a-bb39-d1459ef2edea-webhook-cert\") pod \"metallb-operator-webhook-server-7c896697b-mstrs\" (UID: \"8b488ea8-aba6-430a-bb39-d1459ef2edea\") " pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.505270 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8b488ea8-aba6-430a-bb39-d1459ef2edea-apiservice-cert\") pod \"metallb-operator-webhook-server-7c896697b-mstrs\" (UID: \"8b488ea8-aba6-430a-bb39-d1459ef2edea\") " pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.505293 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmxzm\" (UniqueName: \"kubernetes.io/projected/8b488ea8-aba6-430a-bb39-d1459ef2edea-kube-api-access-qmxzm\") pod \"metallb-operator-webhook-server-7c896697b-mstrs\" (UID: \"8b488ea8-aba6-430a-bb39-d1459ef2edea\") " pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.510623 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8b488ea8-aba6-430a-bb39-d1459ef2edea-apiservice-cert\") pod \"metallb-operator-webhook-server-7c896697b-mstrs\" (UID: \"8b488ea8-aba6-430a-bb39-d1459ef2edea\") " pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.510781 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8b488ea8-aba6-430a-bb39-d1459ef2edea-webhook-cert\") pod \"metallb-operator-webhook-server-7c896697b-mstrs\" (UID: \"8b488ea8-aba6-430a-bb39-d1459ef2edea\") " pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.527931 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmxzm\" (UniqueName: \"kubernetes.io/projected/8b488ea8-aba6-430a-bb39-d1459ef2edea-kube-api-access-qmxzm\") pod \"metallb-operator-webhook-server-7c896697b-mstrs\" (UID: \"8b488ea8-aba6-430a-bb39-d1459ef2edea\") " pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.535114 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt"] Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.586179 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:50:31 crc kubenswrapper[5016]: I1211 10:50:31.836896 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7c896697b-mstrs"] Dec 11 10:50:31 crc kubenswrapper[5016]: W1211 10:50:31.844538 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b488ea8_aba6_430a_bb39_d1459ef2edea.slice/crio-db4bd79f61c67acaa422042c5eddef23bc53048c4268b165009c1be55fb38b21 WatchSource:0}: Error finding container db4bd79f61c67acaa422042c5eddef23bc53048c4268b165009c1be55fb38b21: Status 404 returned error can't find the container with id db4bd79f61c67acaa422042c5eddef23bc53048c4268b165009c1be55fb38b21 Dec 11 10:50:32 crc kubenswrapper[5016]: I1211 10:50:32.403451 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" event={"ID":"8b488ea8-aba6-430a-bb39-d1459ef2edea","Type":"ContainerStarted","Data":"db4bd79f61c67acaa422042c5eddef23bc53048c4268b165009c1be55fb38b21"} Dec 11 10:50:32 crc kubenswrapper[5016]: I1211 10:50:32.405161 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" event={"ID":"bdd623de-6c7c-46b2-a168-fabbbf16ce6c","Type":"ContainerStarted","Data":"b29fdb2f2d37dcae5083dbb1ce76509a3f74670eb213dd28b31ec7b45191fc17"} Dec 11 10:50:36 crc kubenswrapper[5016]: I1211 10:50:36.441489 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" event={"ID":"bdd623de-6c7c-46b2-a168-fabbbf16ce6c","Type":"ContainerStarted","Data":"8ae087b3be1feffced0bbfe10078f8cd2658c2e1e01c0285c182b39374d74dd4"} Dec 11 10:50:36 crc kubenswrapper[5016]: I1211 10:50:36.442548 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:50:36 crc kubenswrapper[5016]: I1211 10:50:36.464885 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" podStartSLOduration=2.982004456 podStartE2EDuration="6.464864938s" podCreationTimestamp="2025-12-11 10:50:30 +0000 UTC" firstStartedPulling="2025-12-11 10:50:31.543391488 +0000 UTC m=+948.361951067" lastFinishedPulling="2025-12-11 10:50:35.02625196 +0000 UTC m=+951.844811549" observedRunningTime="2025-12-11 10:50:36.463053723 +0000 UTC m=+953.281613312" watchObservedRunningTime="2025-12-11 10:50:36.464864938 +0000 UTC m=+953.283424507" Dec 11 10:50:44 crc kubenswrapper[5016]: I1211 10:50:44.515370 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" event={"ID":"8b488ea8-aba6-430a-bb39-d1459ef2edea","Type":"ContainerStarted","Data":"4f931c21bc8c162041be11644f729bf4f601a91a24049be79fe40201b3841b42"} Dec 11 10:50:44 crc kubenswrapper[5016]: I1211 10:50:44.516445 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:50:44 crc kubenswrapper[5016]: I1211 10:50:44.537458 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" podStartSLOduration=1.023850594 podStartE2EDuration="13.537435692s" podCreationTimestamp="2025-12-11 10:50:31 +0000 UTC" firstStartedPulling="2025-12-11 10:50:31.847531494 +0000 UTC m=+948.666091073" lastFinishedPulling="2025-12-11 10:50:44.361116592 +0000 UTC m=+961.179676171" observedRunningTime="2025-12-11 10:50:44.535821292 +0000 UTC m=+961.354380871" watchObservedRunningTime="2025-12-11 10:50:44.537435692 +0000 UTC m=+961.355995281" Dec 11 10:51:01 crc kubenswrapper[5016]: I1211 10:51:01.591841 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-7c896697b-mstrs" Dec 11 10:51:11 crc kubenswrapper[5016]: I1211 10:51:11.215110 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6fcfbcfbcf-dfwbt" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.035249 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-dflh8"] Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.039257 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.042202 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.042249 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.046929 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-l4jrt" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.049237 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8"] Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.050421 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.062509 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8"] Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.068204 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.074904 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/ab16d65b-bdbe-4988-9c13-5e0b91c72217-reloader\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.075018 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/ab16d65b-bdbe-4988-9c13-5e0b91c72217-metrics\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.075046 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/ab16d65b-bdbe-4988-9c13-5e0b91c72217-frr-sockets\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.075151 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab16d65b-bdbe-4988-9c13-5e0b91c72217-metrics-certs\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.075209 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwjqr\" (UniqueName: \"kubernetes.io/projected/ab16d65b-bdbe-4988-9c13-5e0b91c72217-kube-api-access-bwjqr\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.075249 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/ab16d65b-bdbe-4988-9c13-5e0b91c72217-frr-startup\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.075265 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/ab16d65b-bdbe-4988-9c13-5e0b91c72217-frr-conf\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.155692 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-tdtwb"] Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.157096 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.159158 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-9k52n" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.159920 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.160026 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.160479 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.167784 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-5bddd4b946-skxpb"] Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.169146 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.170918 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.176399 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab16d65b-bdbe-4988-9c13-5e0b91c72217-metrics-certs\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.176458 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwjqr\" (UniqueName: \"kubernetes.io/projected/ab16d65b-bdbe-4988-9c13-5e0b91c72217-kube-api-access-bwjqr\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.176488 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/ab16d65b-bdbe-4988-9c13-5e0b91c72217-frr-startup\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.176517 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/ab16d65b-bdbe-4988-9c13-5e0b91c72217-frr-conf\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.176584 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qplc\" (UniqueName: \"kubernetes.io/projected/76b7a036-07e1-4a49-b5c4-39ed67ae34b6-kube-api-access-8qplc\") pod \"frr-k8s-webhook-server-7784b6fcf-4c5v8\" (UID: \"76b7a036-07e1-4a49-b5c4-39ed67ae34b6\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.176625 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/ab16d65b-bdbe-4988-9c13-5e0b91c72217-reloader\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.176662 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/76b7a036-07e1-4a49-b5c4-39ed67ae34b6-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-4c5v8\" (UID: \"76b7a036-07e1-4a49-b5c4-39ed67ae34b6\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.176687 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/ab16d65b-bdbe-4988-9c13-5e0b91c72217-metrics\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.176714 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/ab16d65b-bdbe-4988-9c13-5e0b91c72217-frr-sockets\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: E1211 10:51:12.176976 5016 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Dec 11 10:51:12 crc kubenswrapper[5016]: E1211 10:51:12.177065 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab16d65b-bdbe-4988-9c13-5e0b91c72217-metrics-certs podName:ab16d65b-bdbe-4988-9c13-5e0b91c72217 nodeName:}" failed. No retries permitted until 2025-12-11 10:51:12.677034843 +0000 UTC m=+989.495594422 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ab16d65b-bdbe-4988-9c13-5e0b91c72217-metrics-certs") pod "frr-k8s-dflh8" (UID: "ab16d65b-bdbe-4988-9c13-5e0b91c72217") : secret "frr-k8s-certs-secret" not found Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.177606 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/ab16d65b-bdbe-4988-9c13-5e0b91c72217-frr-sockets\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.177768 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/ab16d65b-bdbe-4988-9c13-5e0b91c72217-metrics\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.177873 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/ab16d65b-bdbe-4988-9c13-5e0b91c72217-frr-conf\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.178051 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/ab16d65b-bdbe-4988-9c13-5e0b91c72217-reloader\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.178454 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/ab16d65b-bdbe-4988-9c13-5e0b91c72217-frr-startup\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.198104 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5bddd4b946-skxpb"] Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.206214 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwjqr\" (UniqueName: \"kubernetes.io/projected/ab16d65b-bdbe-4988-9c13-5e0b91c72217-kube-api-access-bwjqr\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.278231 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgprg\" (UniqueName: \"kubernetes.io/projected/c3691778-17ce-4c44-b8e1-f9f5a6727778-kube-api-access-sgprg\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.278332 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/76b7a036-07e1-4a49-b5c4-39ed67ae34b6-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-4c5v8\" (UID: \"76b7a036-07e1-4a49-b5c4-39ed67ae34b6\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.278357 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-memberlist\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.278491 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c4c659bc-4572-4852-8008-231dc642bbd7-metrics-certs\") pod \"controller-5bddd4b946-skxpb\" (UID: \"c4c659bc-4572-4852-8008-231dc642bbd7\") " pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.278718 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/c3691778-17ce-4c44-b8e1-f9f5a6727778-metallb-excludel2\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.278756 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-metrics-certs\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.278801 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjdwl\" (UniqueName: \"kubernetes.io/projected/c4c659bc-4572-4852-8008-231dc642bbd7-kube-api-access-rjdwl\") pod \"controller-5bddd4b946-skxpb\" (UID: \"c4c659bc-4572-4852-8008-231dc642bbd7\") " pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.278823 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4c659bc-4572-4852-8008-231dc642bbd7-cert\") pod \"controller-5bddd4b946-skxpb\" (UID: \"c4c659bc-4572-4852-8008-231dc642bbd7\") " pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.278905 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qplc\" (UniqueName: \"kubernetes.io/projected/76b7a036-07e1-4a49-b5c4-39ed67ae34b6-kube-api-access-8qplc\") pod \"frr-k8s-webhook-server-7784b6fcf-4c5v8\" (UID: \"76b7a036-07e1-4a49-b5c4-39ed67ae34b6\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.285866 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/76b7a036-07e1-4a49-b5c4-39ed67ae34b6-cert\") pod \"frr-k8s-webhook-server-7784b6fcf-4c5v8\" (UID: \"76b7a036-07e1-4a49-b5c4-39ed67ae34b6\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.313361 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qplc\" (UniqueName: \"kubernetes.io/projected/76b7a036-07e1-4a49-b5c4-39ed67ae34b6-kube-api-access-8qplc\") pod \"frr-k8s-webhook-server-7784b6fcf-4c5v8\" (UID: \"76b7a036-07e1-4a49-b5c4-39ed67ae34b6\") " pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.373232 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.380426 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgprg\" (UniqueName: \"kubernetes.io/projected/c3691778-17ce-4c44-b8e1-f9f5a6727778-kube-api-access-sgprg\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.380524 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-memberlist\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.380561 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c4c659bc-4572-4852-8008-231dc642bbd7-metrics-certs\") pod \"controller-5bddd4b946-skxpb\" (UID: \"c4c659bc-4572-4852-8008-231dc642bbd7\") " pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.380612 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/c3691778-17ce-4c44-b8e1-f9f5a6727778-metallb-excludel2\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.380635 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-metrics-certs\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.380676 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjdwl\" (UniqueName: \"kubernetes.io/projected/c4c659bc-4572-4852-8008-231dc642bbd7-kube-api-access-rjdwl\") pod \"controller-5bddd4b946-skxpb\" (UID: \"c4c659bc-4572-4852-8008-231dc642bbd7\") " pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.380701 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4c659bc-4572-4852-8008-231dc642bbd7-cert\") pod \"controller-5bddd4b946-skxpb\" (UID: \"c4c659bc-4572-4852-8008-231dc642bbd7\") " pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:12 crc kubenswrapper[5016]: E1211 10:51:12.381421 5016 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 11 10:51:12 crc kubenswrapper[5016]: E1211 10:51:12.381499 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-memberlist podName:c3691778-17ce-4c44-b8e1-f9f5a6727778 nodeName:}" failed. No retries permitted until 2025-12-11 10:51:12.881475722 +0000 UTC m=+989.700035301 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-memberlist") pod "speaker-tdtwb" (UID: "c3691778-17ce-4c44-b8e1-f9f5a6727778") : secret "metallb-memberlist" not found Dec 11 10:51:12 crc kubenswrapper[5016]: E1211 10:51:12.382891 5016 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Dec 11 10:51:12 crc kubenswrapper[5016]: E1211 10:51:12.383007 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-metrics-certs podName:c3691778-17ce-4c44-b8e1-f9f5a6727778 nodeName:}" failed. No retries permitted until 2025-12-11 10:51:12.882984549 +0000 UTC m=+989.701544128 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-metrics-certs") pod "speaker-tdtwb" (UID: "c3691778-17ce-4c44-b8e1-f9f5a6727778") : secret "speaker-certs-secret" not found Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.383433 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/c3691778-17ce-4c44-b8e1-f9f5a6727778-metallb-excludel2\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.384587 5016 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.389598 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c4c659bc-4572-4852-8008-231dc642bbd7-metrics-certs\") pod \"controller-5bddd4b946-skxpb\" (UID: \"c4c659bc-4572-4852-8008-231dc642bbd7\") " pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.399704 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4c659bc-4572-4852-8008-231dc642bbd7-cert\") pod \"controller-5bddd4b946-skxpb\" (UID: \"c4c659bc-4572-4852-8008-231dc642bbd7\") " pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.403450 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjdwl\" (UniqueName: \"kubernetes.io/projected/c4c659bc-4572-4852-8008-231dc642bbd7-kube-api-access-rjdwl\") pod \"controller-5bddd4b946-skxpb\" (UID: \"c4c659bc-4572-4852-8008-231dc642bbd7\") " pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.404369 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgprg\" (UniqueName: \"kubernetes.io/projected/c3691778-17ce-4c44-b8e1-f9f5a6727778-kube-api-access-sgprg\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.498476 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.643033 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8"] Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.685726 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab16d65b-bdbe-4988-9c13-5e0b91c72217-metrics-certs\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.690453 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab16d65b-bdbe-4988-9c13-5e0b91c72217-metrics-certs\") pod \"frr-k8s-dflh8\" (UID: \"ab16d65b-bdbe-4988-9c13-5e0b91c72217\") " pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.695974 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" event={"ID":"76b7a036-07e1-4a49-b5c4-39ed67ae34b6","Type":"ContainerStarted","Data":"0620bc63511aeeb9c2cd79937a727cc75cc6458abe9f53cb023aeb02dba36b45"} Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.766208 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-5bddd4b946-skxpb"] Dec 11 10:51:12 crc kubenswrapper[5016]: W1211 10:51:12.768853 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc4c659bc_4572_4852_8008_231dc642bbd7.slice/crio-a88054b4c5b7e4313f45931c2d8903472ada8a28b82866a22291e0a58afb6d22 WatchSource:0}: Error finding container a88054b4c5b7e4313f45931c2d8903472ada8a28b82866a22291e0a58afb6d22: Status 404 returned error can't find the container with id a88054b4c5b7e4313f45931c2d8903472ada8a28b82866a22291e0a58afb6d22 Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.889111 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-memberlist\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.889703 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-metrics-certs\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: E1211 10:51:12.889302 5016 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 11 10:51:12 crc kubenswrapper[5016]: E1211 10:51:12.890311 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-memberlist podName:c3691778-17ce-4c44-b8e1-f9f5a6727778 nodeName:}" failed. No retries permitted until 2025-12-11 10:51:13.890281719 +0000 UTC m=+990.708841448 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-memberlist") pod "speaker-tdtwb" (UID: "c3691778-17ce-4c44-b8e1-f9f5a6727778") : secret "metallb-memberlist" not found Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.897993 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-metrics-certs\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:12 crc kubenswrapper[5016]: I1211 10:51:12.970339 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:13 crc kubenswrapper[5016]: I1211 10:51:13.708246 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-skxpb" event={"ID":"c4c659bc-4572-4852-8008-231dc642bbd7","Type":"ContainerStarted","Data":"b7655005b804135b45ec20a42989bdc740115457cea746552415da21480abdba"} Dec 11 10:51:13 crc kubenswrapper[5016]: I1211 10:51:13.708798 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-skxpb" event={"ID":"c4c659bc-4572-4852-8008-231dc642bbd7","Type":"ContainerStarted","Data":"b3adf81395720e588aa23268044bec93bb686fac025cd09770655274bf47dd6d"} Dec 11 10:51:13 crc kubenswrapper[5016]: I1211 10:51:13.708817 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-5bddd4b946-skxpb" event={"ID":"c4c659bc-4572-4852-8008-231dc642bbd7","Type":"ContainerStarted","Data":"a88054b4c5b7e4313f45931c2d8903472ada8a28b82866a22291e0a58afb6d22"} Dec 11 10:51:13 crc kubenswrapper[5016]: I1211 10:51:13.710208 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:13 crc kubenswrapper[5016]: I1211 10:51:13.713501 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dflh8" event={"ID":"ab16d65b-bdbe-4988-9c13-5e0b91c72217","Type":"ContainerStarted","Data":"d7fc336f3d2f7f2866e767bb8c06b910e0ca6eaabe2d214a1cf499a182faaacb"} Dec 11 10:51:13 crc kubenswrapper[5016]: I1211 10:51:13.727784 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-5bddd4b946-skxpb" podStartSLOduration=1.727702731 podStartE2EDuration="1.727702731s" podCreationTimestamp="2025-12-11 10:51:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:51:13.727497066 +0000 UTC m=+990.546056665" watchObservedRunningTime="2025-12-11 10:51:13.727702731 +0000 UTC m=+990.546262310" Dec 11 10:51:13 crc kubenswrapper[5016]: I1211 10:51:13.904555 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-memberlist\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:13 crc kubenswrapper[5016]: I1211 10:51:13.908388 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/c3691778-17ce-4c44-b8e1-f9f5a6727778-memberlist\") pod \"speaker-tdtwb\" (UID: \"c3691778-17ce-4c44-b8e1-f9f5a6727778\") " pod="metallb-system/speaker-tdtwb" Dec 11 10:51:13 crc kubenswrapper[5016]: I1211 10:51:13.976802 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-tdtwb" Dec 11 10:51:13 crc kubenswrapper[5016]: W1211 10:51:13.998747 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc3691778_17ce_4c44_b8e1_f9f5a6727778.slice/crio-b4c5fc736380b5e53f7bb6e97570280ebdde8d8df2500b40fdaa6daf66f0a28d WatchSource:0}: Error finding container b4c5fc736380b5e53f7bb6e97570280ebdde8d8df2500b40fdaa6daf66f0a28d: Status 404 returned error can't find the container with id b4c5fc736380b5e53f7bb6e97570280ebdde8d8df2500b40fdaa6daf66f0a28d Dec 11 10:51:14 crc kubenswrapper[5016]: I1211 10:51:14.728202 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tdtwb" event={"ID":"c3691778-17ce-4c44-b8e1-f9f5a6727778","Type":"ContainerStarted","Data":"58704a5e534efcd2764619ab456065086c5fa04202f0998cc10fc9a3141b48fa"} Dec 11 10:51:14 crc kubenswrapper[5016]: I1211 10:51:14.728508 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tdtwb" event={"ID":"c3691778-17ce-4c44-b8e1-f9f5a6727778","Type":"ContainerStarted","Data":"c40b683fa600fd8549cc7981d291b0d4481293205988285c7038f25ed3bdf1bb"} Dec 11 10:51:14 crc kubenswrapper[5016]: I1211 10:51:14.728520 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tdtwb" event={"ID":"c3691778-17ce-4c44-b8e1-f9f5a6727778","Type":"ContainerStarted","Data":"b4c5fc736380b5e53f7bb6e97570280ebdde8d8df2500b40fdaa6daf66f0a28d"} Dec 11 10:51:14 crc kubenswrapper[5016]: I1211 10:51:14.729074 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-tdtwb" Dec 11 10:51:14 crc kubenswrapper[5016]: I1211 10:51:14.767678 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-tdtwb" podStartSLOduration=2.7676580250000002 podStartE2EDuration="2.767658025s" podCreationTimestamp="2025-12-11 10:51:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:51:14.756142502 +0000 UTC m=+991.574702081" watchObservedRunningTime="2025-12-11 10:51:14.767658025 +0000 UTC m=+991.586217604" Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.016474 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-57shs"] Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.020128 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.035538 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-57shs"] Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.110560 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vd2r6\" (UniqueName: \"kubernetes.io/projected/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-kube-api-access-vd2r6\") pod \"redhat-marketplace-57shs\" (UID: \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\") " pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.110631 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-catalog-content\") pod \"redhat-marketplace-57shs\" (UID: \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\") " pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.110730 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-utilities\") pod \"redhat-marketplace-57shs\" (UID: \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\") " pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.212400 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-utilities\") pod \"redhat-marketplace-57shs\" (UID: \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\") " pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.212525 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vd2r6\" (UniqueName: \"kubernetes.io/projected/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-kube-api-access-vd2r6\") pod \"redhat-marketplace-57shs\" (UID: \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\") " pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.212567 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-catalog-content\") pod \"redhat-marketplace-57shs\" (UID: \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\") " pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.213185 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-utilities\") pod \"redhat-marketplace-57shs\" (UID: \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\") " pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.213218 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-catalog-content\") pod \"redhat-marketplace-57shs\" (UID: \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\") " pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.242278 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vd2r6\" (UniqueName: \"kubernetes.io/projected/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-kube-api-access-vd2r6\") pod \"redhat-marketplace-57shs\" (UID: \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\") " pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:19 crc kubenswrapper[5016]: I1211 10:51:19.348302 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:21 crc kubenswrapper[5016]: I1211 10:51:21.526563 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-57shs"] Dec 11 10:51:21 crc kubenswrapper[5016]: I1211 10:51:21.790238 5016 generic.go:334] "Generic (PLEG): container finished" podID="5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" containerID="bf3b6be1d028ad5de3d8cacf269c742af8a98d81834d5861d12af6c619f617e0" exitCode=0 Dec 11 10:51:21 crc kubenswrapper[5016]: I1211 10:51:21.790360 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57shs" event={"ID":"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744","Type":"ContainerDied","Data":"bf3b6be1d028ad5de3d8cacf269c742af8a98d81834d5861d12af6c619f617e0"} Dec 11 10:51:21 crc kubenswrapper[5016]: I1211 10:51:21.790815 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57shs" event={"ID":"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744","Type":"ContainerStarted","Data":"9e5dc370796bf214cde54564f74ef356ca18b4da06d5b4729031b5103d565e50"} Dec 11 10:51:21 crc kubenswrapper[5016]: I1211 10:51:21.793128 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" event={"ID":"76b7a036-07e1-4a49-b5c4-39ed67ae34b6","Type":"ContainerStarted","Data":"18a195769e3dbba8ba073a572bb53fe1a15208287f02f615e6f0eea5e503274e"} Dec 11 10:51:21 crc kubenswrapper[5016]: I1211 10:51:21.793291 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" Dec 11 10:51:21 crc kubenswrapper[5016]: I1211 10:51:21.794884 5016 generic.go:334] "Generic (PLEG): container finished" podID="ab16d65b-bdbe-4988-9c13-5e0b91c72217" containerID="0669454de2c625a73b77f14c48f19d95fe60670e0e1e1a0bbe6fddbefe958094" exitCode=0 Dec 11 10:51:21 crc kubenswrapper[5016]: I1211 10:51:21.794905 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dflh8" event={"ID":"ab16d65b-bdbe-4988-9c13-5e0b91c72217","Type":"ContainerDied","Data":"0669454de2c625a73b77f14c48f19d95fe60670e0e1e1a0bbe6fddbefe958094"} Dec 11 10:51:21 crc kubenswrapper[5016]: I1211 10:51:21.858988 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" podStartSLOduration=1.15650955 podStartE2EDuration="9.85896612s" podCreationTimestamp="2025-12-11 10:51:12 +0000 UTC" firstStartedPulling="2025-12-11 10:51:12.666590917 +0000 UTC m=+989.485150496" lastFinishedPulling="2025-12-11 10:51:21.369047487 +0000 UTC m=+998.187607066" observedRunningTime="2025-12-11 10:51:21.832734484 +0000 UTC m=+998.651294063" watchObservedRunningTime="2025-12-11 10:51:21.85896612 +0000 UTC m=+998.677525709" Dec 11 10:51:22 crc kubenswrapper[5016]: I1211 10:51:22.504218 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-5bddd4b946-skxpb" Dec 11 10:51:22 crc kubenswrapper[5016]: I1211 10:51:22.811336 5016 generic.go:334] "Generic (PLEG): container finished" podID="ab16d65b-bdbe-4988-9c13-5e0b91c72217" containerID="4100f89fe313b354e646b533b9a71b3899449ae4b75577df1f5615e76af03732" exitCode=0 Dec 11 10:51:22 crc kubenswrapper[5016]: I1211 10:51:22.811461 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dflh8" event={"ID":"ab16d65b-bdbe-4988-9c13-5e0b91c72217","Type":"ContainerDied","Data":"4100f89fe313b354e646b533b9a71b3899449ae4b75577df1f5615e76af03732"} Dec 11 10:51:23 crc kubenswrapper[5016]: I1211 10:51:23.821171 5016 generic.go:334] "Generic (PLEG): container finished" podID="5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" containerID="6d15fa7748010dc5129f7895fb5fcd4a56140f56188815fba955881660495547" exitCode=0 Dec 11 10:51:23 crc kubenswrapper[5016]: I1211 10:51:23.821228 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57shs" event={"ID":"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744","Type":"ContainerDied","Data":"6d15fa7748010dc5129f7895fb5fcd4a56140f56188815fba955881660495547"} Dec 11 10:51:23 crc kubenswrapper[5016]: I1211 10:51:23.824377 5016 generic.go:334] "Generic (PLEG): container finished" podID="ab16d65b-bdbe-4988-9c13-5e0b91c72217" containerID="0b56d3e2f6f9c5b837163c6a5e7fa0ae28034c7edbb6c45c97fa192ec216652e" exitCode=0 Dec 11 10:51:23 crc kubenswrapper[5016]: I1211 10:51:23.824439 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dflh8" event={"ID":"ab16d65b-bdbe-4988-9c13-5e0b91c72217","Type":"ContainerDied","Data":"0b56d3e2f6f9c5b837163c6a5e7fa0ae28034c7edbb6c45c97fa192ec216652e"} Dec 11 10:51:24 crc kubenswrapper[5016]: I1211 10:51:24.834269 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dflh8" event={"ID":"ab16d65b-bdbe-4988-9c13-5e0b91c72217","Type":"ContainerStarted","Data":"f100eaee7670982d34364bc93e2cb6f3bac65df482ad8754b9a4df35dad8217a"} Dec 11 10:51:24 crc kubenswrapper[5016]: I1211 10:51:24.834646 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dflh8" event={"ID":"ab16d65b-bdbe-4988-9c13-5e0b91c72217","Type":"ContainerStarted","Data":"b3c027d996b0ed79a1cffc9b335e704d1d5727a7b93b35c7f9dc718b5b51a1ba"} Dec 11 10:51:24 crc kubenswrapper[5016]: I1211 10:51:24.834661 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dflh8" event={"ID":"ab16d65b-bdbe-4988-9c13-5e0b91c72217","Type":"ContainerStarted","Data":"7ed549e558f7e36ff4006ba1feebd6e8f426771a9abd3c62d7ac76476ad12ce2"} Dec 11 10:51:26 crc kubenswrapper[5016]: I1211 10:51:26.849671 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57shs" event={"ID":"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744","Type":"ContainerStarted","Data":"87b02d484d346f8d27e93aee5e99442085913516127d718a4db5db75b1822a9b"} Dec 11 10:51:26 crc kubenswrapper[5016]: I1211 10:51:26.853571 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dflh8" event={"ID":"ab16d65b-bdbe-4988-9c13-5e0b91c72217","Type":"ContainerStarted","Data":"b8dc89f65a3a253e2211fa8a4b454e1fb3b007d86ce0de611ba22ecf06e90597"} Dec 11 10:51:26 crc kubenswrapper[5016]: I1211 10:51:26.853613 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dflh8" event={"ID":"ab16d65b-bdbe-4988-9c13-5e0b91c72217","Type":"ContainerStarted","Data":"01f064ad15136bf89c3c0d13500d1fcadaccad7ec9804271d71ec27d38609458"} Dec 11 10:51:27 crc kubenswrapper[5016]: I1211 10:51:27.863576 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dflh8" event={"ID":"ab16d65b-bdbe-4988-9c13-5e0b91c72217","Type":"ContainerStarted","Data":"54f67e364851b9465beec9faa68048c7cdbbbf99dd76843f37d433bffc7a4406"} Dec 11 10:51:27 crc kubenswrapper[5016]: I1211 10:51:27.893453 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-57shs" podStartSLOduration=5.545718475 podStartE2EDuration="9.893429365s" podCreationTimestamp="2025-12-11 10:51:18 +0000 UTC" firstStartedPulling="2025-12-11 10:51:21.792382121 +0000 UTC m=+998.610941690" lastFinishedPulling="2025-12-11 10:51:26.140093001 +0000 UTC m=+1002.958652580" observedRunningTime="2025-12-11 10:51:26.877501922 +0000 UTC m=+1003.696061511" watchObservedRunningTime="2025-12-11 10:51:27.893429365 +0000 UTC m=+1004.711988964" Dec 11 10:51:27 crc kubenswrapper[5016]: I1211 10:51:27.971611 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:28 crc kubenswrapper[5016]: I1211 10:51:28.015865 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:28 crc kubenswrapper[5016]: I1211 10:51:28.053133 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-dflh8" podStartSLOduration=7.921003783 podStartE2EDuration="16.053115283s" podCreationTimestamp="2025-12-11 10:51:12 +0000 UTC" firstStartedPulling="2025-12-11 10:51:13.277859594 +0000 UTC m=+990.096419173" lastFinishedPulling="2025-12-11 10:51:21.409971094 +0000 UTC m=+998.228530673" observedRunningTime="2025-12-11 10:51:27.893032215 +0000 UTC m=+1004.711591824" watchObservedRunningTime="2025-12-11 10:51:28.053115283 +0000 UTC m=+1004.871674872" Dec 11 10:51:28 crc kubenswrapper[5016]: I1211 10:51:28.869521 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:29 crc kubenswrapper[5016]: I1211 10:51:29.349485 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:29 crc kubenswrapper[5016]: I1211 10:51:29.349776 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:29 crc kubenswrapper[5016]: I1211 10:51:29.392279 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:32 crc kubenswrapper[5016]: I1211 10:51:32.379466 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7784b6fcf-4c5v8" Dec 11 10:51:33 crc kubenswrapper[5016]: I1211 10:51:33.982390 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-tdtwb" Dec 11 10:51:35 crc kubenswrapper[5016]: I1211 10:51:35.828424 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l45qp"] Dec 11 10:51:35 crc kubenswrapper[5016]: I1211 10:51:35.830027 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:35 crc kubenswrapper[5016]: I1211 10:51:35.853149 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l45qp"] Dec 11 10:51:35 crc kubenswrapper[5016]: I1211 10:51:35.862972 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0cde5ac-3bc6-4ac7-981d-16c24e396978-catalog-content\") pod \"certified-operators-l45qp\" (UID: \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\") " pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:35 crc kubenswrapper[5016]: I1211 10:51:35.863018 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0cde5ac-3bc6-4ac7-981d-16c24e396978-utilities\") pod \"certified-operators-l45qp\" (UID: \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\") " pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:35 crc kubenswrapper[5016]: I1211 10:51:35.863114 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2s7fp\" (UniqueName: \"kubernetes.io/projected/e0cde5ac-3bc6-4ac7-981d-16c24e396978-kube-api-access-2s7fp\") pod \"certified-operators-l45qp\" (UID: \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\") " pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:35 crc kubenswrapper[5016]: I1211 10:51:35.964558 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0cde5ac-3bc6-4ac7-981d-16c24e396978-utilities\") pod \"certified-operators-l45qp\" (UID: \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\") " pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:35 crc kubenswrapper[5016]: I1211 10:51:35.964652 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0cde5ac-3bc6-4ac7-981d-16c24e396978-catalog-content\") pod \"certified-operators-l45qp\" (UID: \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\") " pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:35 crc kubenswrapper[5016]: I1211 10:51:35.964748 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2s7fp\" (UniqueName: \"kubernetes.io/projected/e0cde5ac-3bc6-4ac7-981d-16c24e396978-kube-api-access-2s7fp\") pod \"certified-operators-l45qp\" (UID: \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\") " pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:35 crc kubenswrapper[5016]: I1211 10:51:35.965763 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0cde5ac-3bc6-4ac7-981d-16c24e396978-utilities\") pod \"certified-operators-l45qp\" (UID: \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\") " pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:35 crc kubenswrapper[5016]: I1211 10:51:35.966132 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0cde5ac-3bc6-4ac7-981d-16c24e396978-catalog-content\") pod \"certified-operators-l45qp\" (UID: \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\") " pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:35 crc kubenswrapper[5016]: I1211 10:51:35.988534 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2s7fp\" (UniqueName: \"kubernetes.io/projected/e0cde5ac-3bc6-4ac7-981d-16c24e396978-kube-api-access-2s7fp\") pod \"certified-operators-l45qp\" (UID: \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\") " pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:36 crc kubenswrapper[5016]: I1211 10:51:36.152581 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:36 crc kubenswrapper[5016]: I1211 10:51:36.709749 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l45qp"] Dec 11 10:51:36 crc kubenswrapper[5016]: I1211 10:51:36.931133 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l45qp" event={"ID":"e0cde5ac-3bc6-4ac7-981d-16c24e396978","Type":"ContainerStarted","Data":"f0068e172bbd7014e284d75674ed269a97a8100056d3ad004c7c6f511228ef70"} Dec 11 10:51:38 crc kubenswrapper[5016]: I1211 10:51:38.951243 5016 generic.go:334] "Generic (PLEG): container finished" podID="e0cde5ac-3bc6-4ac7-981d-16c24e396978" containerID="d790d3a02b86a6777f617f484d3571bb685e9cf0578ac4ddfe7f684b0595d002" exitCode=0 Dec 11 10:51:38 crc kubenswrapper[5016]: I1211 10:51:38.951347 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l45qp" event={"ID":"e0cde5ac-3bc6-4ac7-981d-16c24e396978","Type":"ContainerDied","Data":"d790d3a02b86a6777f617f484d3571bb685e9cf0578ac4ddfe7f684b0595d002"} Dec 11 10:51:39 crc kubenswrapper[5016]: I1211 10:51:39.404455 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:39 crc kubenswrapper[5016]: I1211 10:51:39.961339 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l45qp" event={"ID":"e0cde5ac-3bc6-4ac7-981d-16c24e396978","Type":"ContainerStarted","Data":"a6f204b4c4fa65181a8362a8e17b75966469ab9dc0c3e660a9450b7830b58e52"} Dec 11 10:51:40 crc kubenswrapper[5016]: I1211 10:51:40.812184 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-75cgw"] Dec 11 10:51:40 crc kubenswrapper[5016]: I1211 10:51:40.813368 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-75cgw" Dec 11 10:51:40 crc kubenswrapper[5016]: I1211 10:51:40.817126 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-prp55" Dec 11 10:51:40 crc kubenswrapper[5016]: I1211 10:51:40.817193 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Dec 11 10:51:40 crc kubenswrapper[5016]: I1211 10:51:40.818869 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Dec 11 10:51:40 crc kubenswrapper[5016]: I1211 10:51:40.823189 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-75cgw"] Dec 11 10:51:40 crc kubenswrapper[5016]: I1211 10:51:40.832214 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgfxx\" (UniqueName: \"kubernetes.io/projected/3c73a135-7e40-4ba1-a674-0259ba8677db-kube-api-access-lgfxx\") pod \"openstack-operator-index-75cgw\" (UID: \"3c73a135-7e40-4ba1-a674-0259ba8677db\") " pod="openstack-operators/openstack-operator-index-75cgw" Dec 11 10:51:40 crc kubenswrapper[5016]: I1211 10:51:40.933598 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgfxx\" (UniqueName: \"kubernetes.io/projected/3c73a135-7e40-4ba1-a674-0259ba8677db-kube-api-access-lgfxx\") pod \"openstack-operator-index-75cgw\" (UID: \"3c73a135-7e40-4ba1-a674-0259ba8677db\") " pod="openstack-operators/openstack-operator-index-75cgw" Dec 11 10:51:40 crc kubenswrapper[5016]: I1211 10:51:40.956444 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgfxx\" (UniqueName: \"kubernetes.io/projected/3c73a135-7e40-4ba1-a674-0259ba8677db-kube-api-access-lgfxx\") pod \"openstack-operator-index-75cgw\" (UID: \"3c73a135-7e40-4ba1-a674-0259ba8677db\") " pod="openstack-operators/openstack-operator-index-75cgw" Dec 11 10:51:40 crc kubenswrapper[5016]: I1211 10:51:40.974292 5016 generic.go:334] "Generic (PLEG): container finished" podID="e0cde5ac-3bc6-4ac7-981d-16c24e396978" containerID="a6f204b4c4fa65181a8362a8e17b75966469ab9dc0c3e660a9450b7830b58e52" exitCode=0 Dec 11 10:51:40 crc kubenswrapper[5016]: I1211 10:51:40.974364 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l45qp" event={"ID":"e0cde5ac-3bc6-4ac7-981d-16c24e396978","Type":"ContainerDied","Data":"a6f204b4c4fa65181a8362a8e17b75966469ab9dc0c3e660a9450b7830b58e52"} Dec 11 10:51:41 crc kubenswrapper[5016]: I1211 10:51:41.128066 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-75cgw" Dec 11 10:51:41 crc kubenswrapper[5016]: I1211 10:51:41.554976 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-75cgw"] Dec 11 10:51:41 crc kubenswrapper[5016]: W1211 10:51:41.566429 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c73a135_7e40_4ba1_a674_0259ba8677db.slice/crio-e9289d9480c8e8c496a8d7258bbc91238010bc23e1c918cb16fc9211e1482bdc WatchSource:0}: Error finding container e9289d9480c8e8c496a8d7258bbc91238010bc23e1c918cb16fc9211e1482bdc: Status 404 returned error can't find the container with id e9289d9480c8e8c496a8d7258bbc91238010bc23e1c918cb16fc9211e1482bdc Dec 11 10:51:41 crc kubenswrapper[5016]: I1211 10:51:41.987405 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l45qp" event={"ID":"e0cde5ac-3bc6-4ac7-981d-16c24e396978","Type":"ContainerStarted","Data":"65c83c747a21a88005110e1b83d82c9598e5cc7351606abee743aa65fb00da29"} Dec 11 10:51:41 crc kubenswrapper[5016]: I1211 10:51:41.989205 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-75cgw" event={"ID":"3c73a135-7e40-4ba1-a674-0259ba8677db","Type":"ContainerStarted","Data":"e9289d9480c8e8c496a8d7258bbc91238010bc23e1c918cb16fc9211e1482bdc"} Dec 11 10:51:42 crc kubenswrapper[5016]: I1211 10:51:42.016588 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l45qp" podStartSLOduration=4.562163719 podStartE2EDuration="7.0165386s" podCreationTimestamp="2025-12-11 10:51:35 +0000 UTC" firstStartedPulling="2025-12-11 10:51:38.953636569 +0000 UTC m=+1015.772196148" lastFinishedPulling="2025-12-11 10:51:41.40801145 +0000 UTC m=+1018.226571029" observedRunningTime="2025-12-11 10:51:42.012913151 +0000 UTC m=+1018.831472750" watchObservedRunningTime="2025-12-11 10:51:42.0165386 +0000 UTC m=+1018.835098179" Dec 11 10:51:42 crc kubenswrapper[5016]: I1211 10:51:42.933154 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:51:42 crc kubenswrapper[5016]: I1211 10:51:42.933237 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:51:42 crc kubenswrapper[5016]: I1211 10:51:42.975645 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-dflh8" Dec 11 10:51:44 crc kubenswrapper[5016]: I1211 10:51:44.600318 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-57shs"] Dec 11 10:51:44 crc kubenswrapper[5016]: I1211 10:51:44.600879 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-57shs" podUID="5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" containerName="registry-server" containerID="cri-o://87b02d484d346f8d27e93aee5e99442085913516127d718a4db5db75b1822a9b" gracePeriod=2 Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.012754 5016 generic.go:334] "Generic (PLEG): container finished" podID="5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" containerID="87b02d484d346f8d27e93aee5e99442085913516127d718a4db5db75b1822a9b" exitCode=0 Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.013329 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57shs" event={"ID":"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744","Type":"ContainerDied","Data":"87b02d484d346f8d27e93aee5e99442085913516127d718a4db5db75b1822a9b"} Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.013374 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-57shs" event={"ID":"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744","Type":"ContainerDied","Data":"9e5dc370796bf214cde54564f74ef356ca18b4da06d5b4729031b5103d565e50"} Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.013388 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e5dc370796bf214cde54564f74ef356ca18b4da06d5b4729031b5103d565e50" Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.023529 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-75cgw" event={"ID":"3c73a135-7e40-4ba1-a674-0259ba8677db","Type":"ContainerStarted","Data":"aebef4ed713e726e99f2f5017a9497bff659deb3869e52475265808855327d78"} Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.024775 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.043677 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-75cgw" podStartSLOduration=2.372337043 podStartE2EDuration="5.043657081s" podCreationTimestamp="2025-12-11 10:51:40 +0000 UTC" firstStartedPulling="2025-12-11 10:51:41.570175349 +0000 UTC m=+1018.388734938" lastFinishedPulling="2025-12-11 10:51:44.241495397 +0000 UTC m=+1021.060054976" observedRunningTime="2025-12-11 10:51:45.03954885 +0000 UTC m=+1021.858108429" watchObservedRunningTime="2025-12-11 10:51:45.043657081 +0000 UTC m=+1021.862216660" Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.208014 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-catalog-content\") pod \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\" (UID: \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\") " Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.208255 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-utilities\") pod \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\" (UID: \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\") " Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.208381 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vd2r6\" (UniqueName: \"kubernetes.io/projected/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-kube-api-access-vd2r6\") pod \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\" (UID: \"5fc3391e-2726-4e32-8d9f-f0d6f5f6b744\") " Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.209060 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-utilities" (OuterVolumeSpecName: "utilities") pod "5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" (UID: "5fc3391e-2726-4e32-8d9f-f0d6f5f6b744"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.215575 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-kube-api-access-vd2r6" (OuterVolumeSpecName: "kube-api-access-vd2r6") pod "5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" (UID: "5fc3391e-2726-4e32-8d9f-f0d6f5f6b744"). InnerVolumeSpecName "kube-api-access-vd2r6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.229628 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" (UID: "5fc3391e-2726-4e32-8d9f-f0d6f5f6b744"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.309505 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.309557 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vd2r6\" (UniqueName: \"kubernetes.io/projected/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-kube-api-access-vd2r6\") on node \"crc\" DevicePath \"\"" Dec 11 10:51:45 crc kubenswrapper[5016]: I1211 10:51:45.309607 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:51:46 crc kubenswrapper[5016]: I1211 10:51:46.028884 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-57shs" Dec 11 10:51:46 crc kubenswrapper[5016]: I1211 10:51:46.052205 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-57shs"] Dec 11 10:51:46 crc kubenswrapper[5016]: I1211 10:51:46.057359 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-57shs"] Dec 11 10:51:46 crc kubenswrapper[5016]: I1211 10:51:46.152763 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:46 crc kubenswrapper[5016]: I1211 10:51:46.152820 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:46 crc kubenswrapper[5016]: I1211 10:51:46.191706 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:47 crc kubenswrapper[5016]: I1211 10:51:47.070621 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:47 crc kubenswrapper[5016]: I1211 10:51:47.481334 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" path="/var/lib/kubelet/pods/5fc3391e-2726-4e32-8d9f-f0d6f5f6b744/volumes" Dec 11 10:51:49 crc kubenswrapper[5016]: I1211 10:51:49.406004 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l45qp"] Dec 11 10:51:49 crc kubenswrapper[5016]: I1211 10:51:49.407157 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l45qp" podUID="e0cde5ac-3bc6-4ac7-981d-16c24e396978" containerName="registry-server" containerID="cri-o://65c83c747a21a88005110e1b83d82c9598e5cc7351606abee743aa65fb00da29" gracePeriod=2 Dec 11 10:51:51 crc kubenswrapper[5016]: I1211 10:51:51.129294 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-75cgw" Dec 11 10:51:51 crc kubenswrapper[5016]: I1211 10:51:51.129628 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-75cgw" Dec 11 10:51:51 crc kubenswrapper[5016]: I1211 10:51:51.162286 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-75cgw" Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.072523 5016 generic.go:334] "Generic (PLEG): container finished" podID="e0cde5ac-3bc6-4ac7-981d-16c24e396978" containerID="65c83c747a21a88005110e1b83d82c9598e5cc7351606abee743aa65fb00da29" exitCode=0 Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.072627 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l45qp" event={"ID":"e0cde5ac-3bc6-4ac7-981d-16c24e396978","Type":"ContainerDied","Data":"65c83c747a21a88005110e1b83d82c9598e5cc7351606abee743aa65fb00da29"} Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.106861 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-75cgw" Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.515214 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.619041 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2s7fp\" (UniqueName: \"kubernetes.io/projected/e0cde5ac-3bc6-4ac7-981d-16c24e396978-kube-api-access-2s7fp\") pod \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\" (UID: \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\") " Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.619141 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0cde5ac-3bc6-4ac7-981d-16c24e396978-catalog-content\") pod \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\" (UID: \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\") " Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.619176 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0cde5ac-3bc6-4ac7-981d-16c24e396978-utilities\") pod \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\" (UID: \"e0cde5ac-3bc6-4ac7-981d-16c24e396978\") " Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.620478 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0cde5ac-3bc6-4ac7-981d-16c24e396978-utilities" (OuterVolumeSpecName: "utilities") pod "e0cde5ac-3bc6-4ac7-981d-16c24e396978" (UID: "e0cde5ac-3bc6-4ac7-981d-16c24e396978"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.626145 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0cde5ac-3bc6-4ac7-981d-16c24e396978-kube-api-access-2s7fp" (OuterVolumeSpecName: "kube-api-access-2s7fp") pod "e0cde5ac-3bc6-4ac7-981d-16c24e396978" (UID: "e0cde5ac-3bc6-4ac7-981d-16c24e396978"). InnerVolumeSpecName "kube-api-access-2s7fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.670271 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0cde5ac-3bc6-4ac7-981d-16c24e396978-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e0cde5ac-3bc6-4ac7-981d-16c24e396978" (UID: "e0cde5ac-3bc6-4ac7-981d-16c24e396978"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.721294 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2s7fp\" (UniqueName: \"kubernetes.io/projected/e0cde5ac-3bc6-4ac7-981d-16c24e396978-kube-api-access-2s7fp\") on node \"crc\" DevicePath \"\"" Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.721329 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0cde5ac-3bc6-4ac7-981d-16c24e396978-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:51:52 crc kubenswrapper[5016]: I1211 10:51:52.721341 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0cde5ac-3bc6-4ac7-981d-16c24e396978-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:51:53 crc kubenswrapper[5016]: I1211 10:51:53.082428 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l45qp" event={"ID":"e0cde5ac-3bc6-4ac7-981d-16c24e396978","Type":"ContainerDied","Data":"f0068e172bbd7014e284d75674ed269a97a8100056d3ad004c7c6f511228ef70"} Dec 11 10:51:53 crc kubenswrapper[5016]: I1211 10:51:53.082492 5016 scope.go:117] "RemoveContainer" containerID="65c83c747a21a88005110e1b83d82c9598e5cc7351606abee743aa65fb00da29" Dec 11 10:51:53 crc kubenswrapper[5016]: I1211 10:51:53.082516 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l45qp" Dec 11 10:51:53 crc kubenswrapper[5016]: I1211 10:51:53.114544 5016 scope.go:117] "RemoveContainer" containerID="a6f204b4c4fa65181a8362a8e17b75966469ab9dc0c3e660a9450b7830b58e52" Dec 11 10:51:53 crc kubenswrapper[5016]: I1211 10:51:53.116374 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l45qp"] Dec 11 10:51:53 crc kubenswrapper[5016]: I1211 10:51:53.121590 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l45qp"] Dec 11 10:51:53 crc kubenswrapper[5016]: I1211 10:51:53.166235 5016 scope.go:117] "RemoveContainer" containerID="d790d3a02b86a6777f617f484d3571bb685e9cf0578ac4ddfe7f684b0595d002" Dec 11 10:51:53 crc kubenswrapper[5016]: I1211 10:51:53.486376 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0cde5ac-3bc6-4ac7-981d-16c24e396978" path="/var/lib/kubelet/pods/e0cde5ac-3bc6-4ac7-981d-16c24e396978/volumes" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.058217 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl"] Dec 11 10:51:55 crc kubenswrapper[5016]: E1211 10:51:55.058551 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0cde5ac-3bc6-4ac7-981d-16c24e396978" containerName="extract-content" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.058570 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0cde5ac-3bc6-4ac7-981d-16c24e396978" containerName="extract-content" Dec 11 10:51:55 crc kubenswrapper[5016]: E1211 10:51:55.058590 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" containerName="extract-utilities" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.058600 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" containerName="extract-utilities" Dec 11 10:51:55 crc kubenswrapper[5016]: E1211 10:51:55.058618 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0cde5ac-3bc6-4ac7-981d-16c24e396978" containerName="registry-server" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.058629 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0cde5ac-3bc6-4ac7-981d-16c24e396978" containerName="registry-server" Dec 11 10:51:55 crc kubenswrapper[5016]: E1211 10:51:55.058649 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0cde5ac-3bc6-4ac7-981d-16c24e396978" containerName="extract-utilities" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.058658 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0cde5ac-3bc6-4ac7-981d-16c24e396978" containerName="extract-utilities" Dec 11 10:51:55 crc kubenswrapper[5016]: E1211 10:51:55.058680 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" containerName="extract-content" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.058690 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" containerName="extract-content" Dec 11 10:51:55 crc kubenswrapper[5016]: E1211 10:51:55.058714 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" containerName="registry-server" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.058724 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" containerName="registry-server" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.058903 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="5fc3391e-2726-4e32-8d9f-f0d6f5f6b744" containerName="registry-server" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.058927 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0cde5ac-3bc6-4ac7-981d-16c24e396978" containerName="registry-server" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.060291 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.064806 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-5smv7" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.073428 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl"] Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.176557 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9w74w\" (UniqueName: \"kubernetes.io/projected/34f1fbe7-6974-4320-8365-3b047d159e3a-kube-api-access-9w74w\") pod \"2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl\" (UID: \"34f1fbe7-6974-4320-8365-3b047d159e3a\") " pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.176641 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/34f1fbe7-6974-4320-8365-3b047d159e3a-util\") pod \"2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl\" (UID: \"34f1fbe7-6974-4320-8365-3b047d159e3a\") " pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.176687 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/34f1fbe7-6974-4320-8365-3b047d159e3a-bundle\") pod \"2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl\" (UID: \"34f1fbe7-6974-4320-8365-3b047d159e3a\") " pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.277639 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9w74w\" (UniqueName: \"kubernetes.io/projected/34f1fbe7-6974-4320-8365-3b047d159e3a-kube-api-access-9w74w\") pod \"2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl\" (UID: \"34f1fbe7-6974-4320-8365-3b047d159e3a\") " pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.277695 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/34f1fbe7-6974-4320-8365-3b047d159e3a-util\") pod \"2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl\" (UID: \"34f1fbe7-6974-4320-8365-3b047d159e3a\") " pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.277716 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/34f1fbe7-6974-4320-8365-3b047d159e3a-bundle\") pod \"2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl\" (UID: \"34f1fbe7-6974-4320-8365-3b047d159e3a\") " pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.278269 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/34f1fbe7-6974-4320-8365-3b047d159e3a-bundle\") pod \"2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl\" (UID: \"34f1fbe7-6974-4320-8365-3b047d159e3a\") " pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.278446 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/34f1fbe7-6974-4320-8365-3b047d159e3a-util\") pod \"2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl\" (UID: \"34f1fbe7-6974-4320-8365-3b047d159e3a\") " pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.300929 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9w74w\" (UniqueName: \"kubernetes.io/projected/34f1fbe7-6974-4320-8365-3b047d159e3a-kube-api-access-9w74w\") pod \"2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl\" (UID: \"34f1fbe7-6974-4320-8365-3b047d159e3a\") " pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.381599 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:51:55 crc kubenswrapper[5016]: I1211 10:51:55.615077 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl"] Dec 11 10:51:56 crc kubenswrapper[5016]: I1211 10:51:56.103911 5016 generic.go:334] "Generic (PLEG): container finished" podID="34f1fbe7-6974-4320-8365-3b047d159e3a" containerID="b21291631a9e96f96b463e6b406febc3c267f07bb9cbe8969c8ea2bd18b9c2ff" exitCode=0 Dec 11 10:51:56 crc kubenswrapper[5016]: I1211 10:51:56.103978 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" event={"ID":"34f1fbe7-6974-4320-8365-3b047d159e3a","Type":"ContainerDied","Data":"b21291631a9e96f96b463e6b406febc3c267f07bb9cbe8969c8ea2bd18b9c2ff"} Dec 11 10:51:56 crc kubenswrapper[5016]: I1211 10:51:56.104276 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" event={"ID":"34f1fbe7-6974-4320-8365-3b047d159e3a","Type":"ContainerStarted","Data":"daef1eaa0963b12c363af6588bab7b0a74fe71040ebcecba994a956a92d344e2"} Dec 11 10:51:57 crc kubenswrapper[5016]: I1211 10:51:57.113548 5016 generic.go:334] "Generic (PLEG): container finished" podID="34f1fbe7-6974-4320-8365-3b047d159e3a" containerID="851b008cd588ed510d2a8cf0dc29f43f0e47456841e2940bffc24d0537b1588c" exitCode=0 Dec 11 10:51:57 crc kubenswrapper[5016]: I1211 10:51:57.113617 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" event={"ID":"34f1fbe7-6974-4320-8365-3b047d159e3a","Type":"ContainerDied","Data":"851b008cd588ed510d2a8cf0dc29f43f0e47456841e2940bffc24d0537b1588c"} Dec 11 10:51:58 crc kubenswrapper[5016]: I1211 10:51:58.124919 5016 generic.go:334] "Generic (PLEG): container finished" podID="34f1fbe7-6974-4320-8365-3b047d159e3a" containerID="5a0f6e74cc5b38a0aca3485653542c85f1d20ebe6599efc2ea76a69f97feae27" exitCode=0 Dec 11 10:51:58 crc kubenswrapper[5016]: I1211 10:51:58.125062 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" event={"ID":"34f1fbe7-6974-4320-8365-3b047d159e3a","Type":"ContainerDied","Data":"5a0f6e74cc5b38a0aca3485653542c85f1d20ebe6599efc2ea76a69f97feae27"} Dec 11 10:51:59 crc kubenswrapper[5016]: I1211 10:51:59.428846 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:51:59 crc kubenswrapper[5016]: I1211 10:51:59.544095 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/34f1fbe7-6974-4320-8365-3b047d159e3a-bundle\") pod \"34f1fbe7-6974-4320-8365-3b047d159e3a\" (UID: \"34f1fbe7-6974-4320-8365-3b047d159e3a\") " Dec 11 10:51:59 crc kubenswrapper[5016]: I1211 10:51:59.544580 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/34f1fbe7-6974-4320-8365-3b047d159e3a-util\") pod \"34f1fbe7-6974-4320-8365-3b047d159e3a\" (UID: \"34f1fbe7-6974-4320-8365-3b047d159e3a\") " Dec 11 10:51:59 crc kubenswrapper[5016]: I1211 10:51:59.544863 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9w74w\" (UniqueName: \"kubernetes.io/projected/34f1fbe7-6974-4320-8365-3b047d159e3a-kube-api-access-9w74w\") pod \"34f1fbe7-6974-4320-8365-3b047d159e3a\" (UID: \"34f1fbe7-6974-4320-8365-3b047d159e3a\") " Dec 11 10:51:59 crc kubenswrapper[5016]: I1211 10:51:59.545963 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34f1fbe7-6974-4320-8365-3b047d159e3a-bundle" (OuterVolumeSpecName: "bundle") pod "34f1fbe7-6974-4320-8365-3b047d159e3a" (UID: "34f1fbe7-6974-4320-8365-3b047d159e3a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:51:59 crc kubenswrapper[5016]: I1211 10:51:59.553127 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34f1fbe7-6974-4320-8365-3b047d159e3a-kube-api-access-9w74w" (OuterVolumeSpecName: "kube-api-access-9w74w") pod "34f1fbe7-6974-4320-8365-3b047d159e3a" (UID: "34f1fbe7-6974-4320-8365-3b047d159e3a"). InnerVolumeSpecName "kube-api-access-9w74w". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:51:59 crc kubenswrapper[5016]: I1211 10:51:59.558810 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34f1fbe7-6974-4320-8365-3b047d159e3a-util" (OuterVolumeSpecName: "util") pod "34f1fbe7-6974-4320-8365-3b047d159e3a" (UID: "34f1fbe7-6974-4320-8365-3b047d159e3a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:51:59 crc kubenswrapper[5016]: I1211 10:51:59.647226 5016 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/34f1fbe7-6974-4320-8365-3b047d159e3a-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:51:59 crc kubenswrapper[5016]: I1211 10:51:59.647298 5016 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/34f1fbe7-6974-4320-8365-3b047d159e3a-util\") on node \"crc\" DevicePath \"\"" Dec 11 10:51:59 crc kubenswrapper[5016]: I1211 10:51:59.647324 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9w74w\" (UniqueName: \"kubernetes.io/projected/34f1fbe7-6974-4320-8365-3b047d159e3a-kube-api-access-9w74w\") on node \"crc\" DevicePath \"\"" Dec 11 10:52:00 crc kubenswrapper[5016]: I1211 10:52:00.145781 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" event={"ID":"34f1fbe7-6974-4320-8365-3b047d159e3a","Type":"ContainerDied","Data":"daef1eaa0963b12c363af6588bab7b0a74fe71040ebcecba994a956a92d344e2"} Dec 11 10:52:00 crc kubenswrapper[5016]: I1211 10:52:00.146273 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="daef1eaa0963b12c363af6588bab7b0a74fe71040ebcecba994a956a92d344e2" Dec 11 10:52:00 crc kubenswrapper[5016]: I1211 10:52:00.145881 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl" Dec 11 10:52:03 crc kubenswrapper[5016]: I1211 10:52:03.643534 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m"] Dec 11 10:52:03 crc kubenswrapper[5016]: E1211 10:52:03.644138 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34f1fbe7-6974-4320-8365-3b047d159e3a" containerName="util" Dec 11 10:52:03 crc kubenswrapper[5016]: I1211 10:52:03.644150 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="34f1fbe7-6974-4320-8365-3b047d159e3a" containerName="util" Dec 11 10:52:03 crc kubenswrapper[5016]: E1211 10:52:03.644167 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34f1fbe7-6974-4320-8365-3b047d159e3a" containerName="pull" Dec 11 10:52:03 crc kubenswrapper[5016]: I1211 10:52:03.644172 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="34f1fbe7-6974-4320-8365-3b047d159e3a" containerName="pull" Dec 11 10:52:03 crc kubenswrapper[5016]: E1211 10:52:03.644183 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34f1fbe7-6974-4320-8365-3b047d159e3a" containerName="extract" Dec 11 10:52:03 crc kubenswrapper[5016]: I1211 10:52:03.644191 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="34f1fbe7-6974-4320-8365-3b047d159e3a" containerName="extract" Dec 11 10:52:03 crc kubenswrapper[5016]: I1211 10:52:03.644297 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="34f1fbe7-6974-4320-8365-3b047d159e3a" containerName="extract" Dec 11 10:52:03 crc kubenswrapper[5016]: I1211 10:52:03.644686 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m" Dec 11 10:52:03 crc kubenswrapper[5016]: I1211 10:52:03.650531 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-vjh2c" Dec 11 10:52:03 crc kubenswrapper[5016]: I1211 10:52:03.683046 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m"] Dec 11 10:52:03 crc kubenswrapper[5016]: I1211 10:52:03.711837 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hggzg\" (UniqueName: \"kubernetes.io/projected/2c43efae-bdbc-4043-b4fc-6e04c5f95003-kube-api-access-hggzg\") pod \"openstack-operator-controller-operator-b7dd9c5f4-ktl4m\" (UID: \"2c43efae-bdbc-4043-b4fc-6e04c5f95003\") " pod="openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m" Dec 11 10:52:03 crc kubenswrapper[5016]: I1211 10:52:03.813082 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hggzg\" (UniqueName: \"kubernetes.io/projected/2c43efae-bdbc-4043-b4fc-6e04c5f95003-kube-api-access-hggzg\") pod \"openstack-operator-controller-operator-b7dd9c5f4-ktl4m\" (UID: \"2c43efae-bdbc-4043-b4fc-6e04c5f95003\") " pod="openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m" Dec 11 10:52:03 crc kubenswrapper[5016]: I1211 10:52:03.846278 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hggzg\" (UniqueName: \"kubernetes.io/projected/2c43efae-bdbc-4043-b4fc-6e04c5f95003-kube-api-access-hggzg\") pod \"openstack-operator-controller-operator-b7dd9c5f4-ktl4m\" (UID: \"2c43efae-bdbc-4043-b4fc-6e04c5f95003\") " pod="openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m" Dec 11 10:52:03 crc kubenswrapper[5016]: I1211 10:52:03.960926 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m" Dec 11 10:52:04 crc kubenswrapper[5016]: I1211 10:52:04.328431 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m"] Dec 11 10:52:05 crc kubenswrapper[5016]: I1211 10:52:05.190794 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m" event={"ID":"2c43efae-bdbc-4043-b4fc-6e04c5f95003","Type":"ContainerStarted","Data":"3a09632320db826bc263dc8173f685772d703265ab425a8d817e2e10a3377157"} Dec 11 10:52:09 crc kubenswrapper[5016]: I1211 10:52:09.223758 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m" event={"ID":"2c43efae-bdbc-4043-b4fc-6e04c5f95003","Type":"ContainerStarted","Data":"911087a988939e12d7b5e4c629c7d01e1c5b2d955ae8c308a9e309422a55085a"} Dec 11 10:52:09 crc kubenswrapper[5016]: I1211 10:52:09.226696 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m" Dec 11 10:52:09 crc kubenswrapper[5016]: I1211 10:52:09.270606 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m" podStartSLOduration=2.2275002 podStartE2EDuration="6.270576084s" podCreationTimestamp="2025-12-11 10:52:03 +0000 UTC" firstStartedPulling="2025-12-11 10:52:04.33852478 +0000 UTC m=+1041.157084359" lastFinishedPulling="2025-12-11 10:52:08.381600664 +0000 UTC m=+1045.200160243" observedRunningTime="2025-12-11 10:52:09.264194627 +0000 UTC m=+1046.082754216" watchObservedRunningTime="2025-12-11 10:52:09.270576084 +0000 UTC m=+1046.089135673" Dec 11 10:52:12 crc kubenswrapper[5016]: I1211 10:52:12.932845 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:52:12 crc kubenswrapper[5016]: I1211 10:52:12.933168 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.008909 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lj24n"] Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.010368 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.063186 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lj24n"] Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.167962 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e936ee06-f638-4024-8026-04201d3f6116-catalog-content\") pod \"community-operators-lj24n\" (UID: \"e936ee06-f638-4024-8026-04201d3f6116\") " pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.168014 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e936ee06-f638-4024-8026-04201d3f6116-utilities\") pod \"community-operators-lj24n\" (UID: \"e936ee06-f638-4024-8026-04201d3f6116\") " pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.168179 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fk7x\" (UniqueName: \"kubernetes.io/projected/e936ee06-f638-4024-8026-04201d3f6116-kube-api-access-5fk7x\") pod \"community-operators-lj24n\" (UID: \"e936ee06-f638-4024-8026-04201d3f6116\") " pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.270220 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fk7x\" (UniqueName: \"kubernetes.io/projected/e936ee06-f638-4024-8026-04201d3f6116-kube-api-access-5fk7x\") pod \"community-operators-lj24n\" (UID: \"e936ee06-f638-4024-8026-04201d3f6116\") " pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.270315 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e936ee06-f638-4024-8026-04201d3f6116-catalog-content\") pod \"community-operators-lj24n\" (UID: \"e936ee06-f638-4024-8026-04201d3f6116\") " pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.270343 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e936ee06-f638-4024-8026-04201d3f6116-utilities\") pod \"community-operators-lj24n\" (UID: \"e936ee06-f638-4024-8026-04201d3f6116\") " pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.270904 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e936ee06-f638-4024-8026-04201d3f6116-utilities\") pod \"community-operators-lj24n\" (UID: \"e936ee06-f638-4024-8026-04201d3f6116\") " pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.270929 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e936ee06-f638-4024-8026-04201d3f6116-catalog-content\") pod \"community-operators-lj24n\" (UID: \"e936ee06-f638-4024-8026-04201d3f6116\") " pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.297252 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fk7x\" (UniqueName: \"kubernetes.io/projected/e936ee06-f638-4024-8026-04201d3f6116-kube-api-access-5fk7x\") pod \"community-operators-lj24n\" (UID: \"e936ee06-f638-4024-8026-04201d3f6116\") " pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.336969 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.632412 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lj24n"] Dec 11 10:52:13 crc kubenswrapper[5016]: I1211 10:52:13.965525 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-b7dd9c5f4-ktl4m" Dec 11 10:52:14 crc kubenswrapper[5016]: I1211 10:52:14.261351 5016 generic.go:334] "Generic (PLEG): container finished" podID="e936ee06-f638-4024-8026-04201d3f6116" containerID="d0a98fc7285ac0c7aceacbfa6243650e0df12d7523d96d5ffac253bd22540975" exitCode=0 Dec 11 10:52:14 crc kubenswrapper[5016]: I1211 10:52:14.261622 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj24n" event={"ID":"e936ee06-f638-4024-8026-04201d3f6116","Type":"ContainerDied","Data":"d0a98fc7285ac0c7aceacbfa6243650e0df12d7523d96d5ffac253bd22540975"} Dec 11 10:52:14 crc kubenswrapper[5016]: I1211 10:52:14.261753 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj24n" event={"ID":"e936ee06-f638-4024-8026-04201d3f6116","Type":"ContainerStarted","Data":"3c6849348ad297f9f5682d1d436071030584065d3af8457fbd5cc1b903d32d7f"} Dec 11 10:52:15 crc kubenswrapper[5016]: I1211 10:52:15.270308 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj24n" event={"ID":"e936ee06-f638-4024-8026-04201d3f6116","Type":"ContainerStarted","Data":"0edbe913f072f64da440b2cd0a93f1a09c50b58068f730fcebd3e9735d281a62"} Dec 11 10:52:16 crc kubenswrapper[5016]: I1211 10:52:16.280253 5016 generic.go:334] "Generic (PLEG): container finished" podID="e936ee06-f638-4024-8026-04201d3f6116" containerID="0edbe913f072f64da440b2cd0a93f1a09c50b58068f730fcebd3e9735d281a62" exitCode=0 Dec 11 10:52:16 crc kubenswrapper[5016]: I1211 10:52:16.280308 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj24n" event={"ID":"e936ee06-f638-4024-8026-04201d3f6116","Type":"ContainerDied","Data":"0edbe913f072f64da440b2cd0a93f1a09c50b58068f730fcebd3e9735d281a62"} Dec 11 10:52:17 crc kubenswrapper[5016]: I1211 10:52:17.288069 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj24n" event={"ID":"e936ee06-f638-4024-8026-04201d3f6116","Type":"ContainerStarted","Data":"505022c117601d0f5c6cecb75056893d75c4840e660673c58b423a871f29dc36"} Dec 11 10:52:17 crc kubenswrapper[5016]: I1211 10:52:17.305859 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lj24n" podStartSLOduration=2.846544599 podStartE2EDuration="5.305838711s" podCreationTimestamp="2025-12-11 10:52:12 +0000 UTC" firstStartedPulling="2025-12-11 10:52:14.264995043 +0000 UTC m=+1051.083554622" lastFinishedPulling="2025-12-11 10:52:16.724289155 +0000 UTC m=+1053.542848734" observedRunningTime="2025-12-11 10:52:17.304500188 +0000 UTC m=+1054.123059767" watchObservedRunningTime="2025-12-11 10:52:17.305838711 +0000 UTC m=+1054.124398290" Dec 11 10:52:23 crc kubenswrapper[5016]: I1211 10:52:23.337816 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:23 crc kubenswrapper[5016]: I1211 10:52:23.338437 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:23 crc kubenswrapper[5016]: I1211 10:52:23.390878 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:24 crc kubenswrapper[5016]: I1211 10:52:24.385903 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:25 crc kubenswrapper[5016]: I1211 10:52:25.799261 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lj24n"] Dec 11 10:52:26 crc kubenswrapper[5016]: I1211 10:52:26.346998 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lj24n" podUID="e936ee06-f638-4024-8026-04201d3f6116" containerName="registry-server" containerID="cri-o://505022c117601d0f5c6cecb75056893d75c4840e660673c58b423a871f29dc36" gracePeriod=2 Dec 11 10:52:28 crc kubenswrapper[5016]: I1211 10:52:28.372641 5016 generic.go:334] "Generic (PLEG): container finished" podID="e936ee06-f638-4024-8026-04201d3f6116" containerID="505022c117601d0f5c6cecb75056893d75c4840e660673c58b423a871f29dc36" exitCode=0 Dec 11 10:52:28 crc kubenswrapper[5016]: I1211 10:52:28.372711 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj24n" event={"ID":"e936ee06-f638-4024-8026-04201d3f6116","Type":"ContainerDied","Data":"505022c117601d0f5c6cecb75056893d75c4840e660673c58b423a871f29dc36"} Dec 11 10:52:28 crc kubenswrapper[5016]: I1211 10:52:28.588697 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:28 crc kubenswrapper[5016]: I1211 10:52:28.626705 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e936ee06-f638-4024-8026-04201d3f6116-catalog-content\") pod \"e936ee06-f638-4024-8026-04201d3f6116\" (UID: \"e936ee06-f638-4024-8026-04201d3f6116\") " Dec 11 10:52:28 crc kubenswrapper[5016]: I1211 10:52:28.626799 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fk7x\" (UniqueName: \"kubernetes.io/projected/e936ee06-f638-4024-8026-04201d3f6116-kube-api-access-5fk7x\") pod \"e936ee06-f638-4024-8026-04201d3f6116\" (UID: \"e936ee06-f638-4024-8026-04201d3f6116\") " Dec 11 10:52:28 crc kubenswrapper[5016]: I1211 10:52:28.626879 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e936ee06-f638-4024-8026-04201d3f6116-utilities\") pod \"e936ee06-f638-4024-8026-04201d3f6116\" (UID: \"e936ee06-f638-4024-8026-04201d3f6116\") " Dec 11 10:52:28 crc kubenswrapper[5016]: I1211 10:52:28.628271 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e936ee06-f638-4024-8026-04201d3f6116-utilities" (OuterVolumeSpecName: "utilities") pod "e936ee06-f638-4024-8026-04201d3f6116" (UID: "e936ee06-f638-4024-8026-04201d3f6116"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:52:28 crc kubenswrapper[5016]: I1211 10:52:28.638157 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e936ee06-f638-4024-8026-04201d3f6116-kube-api-access-5fk7x" (OuterVolumeSpecName: "kube-api-access-5fk7x") pod "e936ee06-f638-4024-8026-04201d3f6116" (UID: "e936ee06-f638-4024-8026-04201d3f6116"). InnerVolumeSpecName "kube-api-access-5fk7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:52:28 crc kubenswrapper[5016]: I1211 10:52:28.688614 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e936ee06-f638-4024-8026-04201d3f6116-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e936ee06-f638-4024-8026-04201d3f6116" (UID: "e936ee06-f638-4024-8026-04201d3f6116"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:52:28 crc kubenswrapper[5016]: I1211 10:52:28.729360 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e936ee06-f638-4024-8026-04201d3f6116-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:52:28 crc kubenswrapper[5016]: I1211 10:52:28.729393 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fk7x\" (UniqueName: \"kubernetes.io/projected/e936ee06-f638-4024-8026-04201d3f6116-kube-api-access-5fk7x\") on node \"crc\" DevicePath \"\"" Dec 11 10:52:28 crc kubenswrapper[5016]: I1211 10:52:28.729405 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e936ee06-f638-4024-8026-04201d3f6116-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:52:29 crc kubenswrapper[5016]: I1211 10:52:29.382114 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj24n" event={"ID":"e936ee06-f638-4024-8026-04201d3f6116","Type":"ContainerDied","Data":"3c6849348ad297f9f5682d1d436071030584065d3af8457fbd5cc1b903d32d7f"} Dec 11 10:52:29 crc kubenswrapper[5016]: I1211 10:52:29.382455 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lj24n" Dec 11 10:52:29 crc kubenswrapper[5016]: I1211 10:52:29.382481 5016 scope.go:117] "RemoveContainer" containerID="505022c117601d0f5c6cecb75056893d75c4840e660673c58b423a871f29dc36" Dec 11 10:52:29 crc kubenswrapper[5016]: I1211 10:52:29.410874 5016 scope.go:117] "RemoveContainer" containerID="0edbe913f072f64da440b2cd0a93f1a09c50b58068f730fcebd3e9735d281a62" Dec 11 10:52:29 crc kubenswrapper[5016]: I1211 10:52:29.422735 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lj24n"] Dec 11 10:52:29 crc kubenswrapper[5016]: I1211 10:52:29.432981 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lj24n"] Dec 11 10:52:29 crc kubenswrapper[5016]: I1211 10:52:29.449713 5016 scope.go:117] "RemoveContainer" containerID="d0a98fc7285ac0c7aceacbfa6243650e0df12d7523d96d5ffac253bd22540975" Dec 11 10:52:29 crc kubenswrapper[5016]: I1211 10:52:29.483146 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e936ee06-f638-4024-8026-04201d3f6116" path="/var/lib/kubelet/pods/e936ee06-f638-4024-8026-04201d3f6116/volumes" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.883378 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv"] Dec 11 10:52:33 crc kubenswrapper[5016]: E1211 10:52:33.884088 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e936ee06-f638-4024-8026-04201d3f6116" containerName="registry-server" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.884100 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e936ee06-f638-4024-8026-04201d3f6116" containerName="registry-server" Dec 11 10:52:33 crc kubenswrapper[5016]: E1211 10:52:33.884116 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e936ee06-f638-4024-8026-04201d3f6116" containerName="extract-utilities" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.884122 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e936ee06-f638-4024-8026-04201d3f6116" containerName="extract-utilities" Dec 11 10:52:33 crc kubenswrapper[5016]: E1211 10:52:33.884138 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e936ee06-f638-4024-8026-04201d3f6116" containerName="extract-content" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.884143 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e936ee06-f638-4024-8026-04201d3f6116" containerName="extract-content" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.884246 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="e936ee06-f638-4024-8026-04201d3f6116" containerName="registry-server" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.884826 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.887137 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-nfcqg" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.897686 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv"] Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.899110 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fs4d5\" (UniqueName: \"kubernetes.io/projected/cc7c5322-f255-4c02-b684-d1bccf74eb1a-kube-api-access-fs4d5\") pod \"barbican-operator-controller-manager-7d9dfd778-qtvrv\" (UID: \"cc7c5322-f255-4c02-b684-d1bccf74eb1a\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.921258 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8"] Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.922339 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.931283 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf"] Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.932514 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.935596 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-cn9fl" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.935752 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-pngrv" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.956867 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8"] Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.965135 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf"] Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.992232 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp"] Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.994339 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp" Dec 11 10:52:33 crc kubenswrapper[5016]: I1211 10:52:33.996408 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-l8sk7" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.000612 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrbwp\" (UniqueName: \"kubernetes.io/projected/de234c3f-f96c-444d-a7f5-a453df14d2e4-kube-api-access-lrbwp\") pod \"cinder-operator-controller-manager-6c677c69b-rzjg8\" (UID: \"de234c3f-f96c-444d-a7f5-a453df14d2e4\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.000660 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7bhq\" (UniqueName: \"kubernetes.io/projected/b1812840-a032-4c7a-a851-505f89b19063-kube-api-access-l7bhq\") pod \"glance-operator-controller-manager-5697bb5779-v8hsp\" (UID: \"b1812840-a032-4c7a-a851-505f89b19063\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.000699 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7kdl\" (UniqueName: \"kubernetes.io/projected/251e6e53-bbba-4d67-a361-44c471db70ff-kube-api-access-n7kdl\") pod \"designate-operator-controller-manager-697fb699cf-4hwxf\" (UID: \"251e6e53-bbba-4d67-a361-44c471db70ff\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.000735 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fs4d5\" (UniqueName: \"kubernetes.io/projected/cc7c5322-f255-4c02-b684-d1bccf74eb1a-kube-api-access-fs4d5\") pod \"barbican-operator-controller-manager-7d9dfd778-qtvrv\" (UID: \"cc7c5322-f255-4c02-b684-d1bccf74eb1a\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.018391 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.019688 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.027000 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.027897 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-xxqcs" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.038582 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fs4d5\" (UniqueName: \"kubernetes.io/projected/cc7c5322-f255-4c02-b684-d1bccf74eb1a-kube-api-access-fs4d5\") pod \"barbican-operator-controller-manager-7d9dfd778-qtvrv\" (UID: \"cc7c5322-f255-4c02-b684-d1bccf74eb1a\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.045853 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.049138 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.052047 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-n62wx" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.070928 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.085533 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.091809 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.098029 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-48q65"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.099241 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-967d97867-48q65" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.099803 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.099803 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-bg9m4" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.102548 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7bhq\" (UniqueName: \"kubernetes.io/projected/b1812840-a032-4c7a-a851-505f89b19063-kube-api-access-l7bhq\") pod \"glance-operator-controller-manager-5697bb5779-v8hsp\" (UID: \"b1812840-a032-4c7a-a851-505f89b19063\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.102593 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7kdl\" (UniqueName: \"kubernetes.io/projected/251e6e53-bbba-4d67-a361-44c471db70ff-kube-api-access-n7kdl\") pod \"designate-operator-controller-manager-697fb699cf-4hwxf\" (UID: \"251e6e53-bbba-4d67-a361-44c471db70ff\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.102679 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrbwp\" (UniqueName: \"kubernetes.io/projected/de234c3f-f96c-444d-a7f5-a453df14d2e4-kube-api-access-lrbwp\") pod \"cinder-operator-controller-manager-6c677c69b-rzjg8\" (UID: \"de234c3f-f96c-444d-a7f5-a453df14d2e4\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.107516 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-bkjrn" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.112707 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.134253 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.141581 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7kdl\" (UniqueName: \"kubernetes.io/projected/251e6e53-bbba-4d67-a361-44c471db70ff-kube-api-access-n7kdl\") pod \"designate-operator-controller-manager-697fb699cf-4hwxf\" (UID: \"251e6e53-bbba-4d67-a361-44c471db70ff\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.156784 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.162408 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-5lhfg" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.173778 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.192539 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-48q65"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.202474 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7bhq\" (UniqueName: \"kubernetes.io/projected/b1812840-a032-4c7a-a851-505f89b19063-kube-api-access-l7bhq\") pod \"glance-operator-controller-manager-5697bb5779-v8hsp\" (UID: \"b1812840-a032-4c7a-a851-505f89b19063\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.203968 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t85z9\" (UniqueName: \"kubernetes.io/projected/b44a8ea9-ba71-486d-9672-44146f09acb1-kube-api-access-t85z9\") pod \"infra-operator-controller-manager-78d48bff9d-9s5rq\" (UID: \"b44a8ea9-ba71-486d-9672-44146f09acb1\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.204146 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvm6q\" (UniqueName: \"kubernetes.io/projected/2f35a405-1590-4bd7-9f64-f897bac8e8e7-kube-api-access-xvm6q\") pod \"heat-operator-controller-manager-5f64f6f8bb-jpkdc\" (UID: \"2f35a405-1590-4bd7-9f64-f897bac8e8e7\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.204308 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82tfg\" (UniqueName: \"kubernetes.io/projected/ac95cdf1-ed70-4d47-8b28-3f7f5e68804b-kube-api-access-82tfg\") pod \"ironic-operator-controller-manager-967d97867-48q65\" (UID: \"ac95cdf1-ed70-4d47-8b28-3f7f5e68804b\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-48q65" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.204412 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d8nd\" (UniqueName: \"kubernetes.io/projected/daa29314-dcea-4026-9a51-7f9ceaed9052-kube-api-access-5d8nd\") pod \"horizon-operator-controller-manager-68c6d99b8f-tcntz\" (UID: \"daa29314-dcea-4026-9a51-7f9ceaed9052\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.204508 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert\") pod \"infra-operator-controller-manager-78d48bff9d-9s5rq\" (UID: \"b44a8ea9-ba71-486d-9672-44146f09acb1\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.210530 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrbwp\" (UniqueName: \"kubernetes.io/projected/de234c3f-f96c-444d-a7f5-a453df14d2e4-kube-api-access-lrbwp\") pod \"cinder-operator-controller-manager-6c677c69b-rzjg8\" (UID: \"de234c3f-f96c-444d-a7f5-a453df14d2e4\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.214388 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.233267 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.244550 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.245324 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.246201 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.255367 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-wflld" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.267624 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.268931 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.273778 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.273890 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.277172 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-s654k" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.281017 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.282433 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.288788 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-szljz" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.300916 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.303226 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.312215 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvm6q\" (UniqueName: \"kubernetes.io/projected/2f35a405-1590-4bd7-9f64-f897bac8e8e7-kube-api-access-xvm6q\") pod \"heat-operator-controller-manager-5f64f6f8bb-jpkdc\" (UID: \"2f35a405-1590-4bd7-9f64-f897bac8e8e7\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.312374 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82tfg\" (UniqueName: \"kubernetes.io/projected/ac95cdf1-ed70-4d47-8b28-3f7f5e68804b-kube-api-access-82tfg\") pod \"ironic-operator-controller-manager-967d97867-48q65\" (UID: \"ac95cdf1-ed70-4d47-8b28-3f7f5e68804b\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-48q65" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.312485 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d8nd\" (UniqueName: \"kubernetes.io/projected/daa29314-dcea-4026-9a51-7f9ceaed9052-kube-api-access-5d8nd\") pod \"horizon-operator-controller-manager-68c6d99b8f-tcntz\" (UID: \"daa29314-dcea-4026-9a51-7f9ceaed9052\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.312566 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6758\" (UniqueName: \"kubernetes.io/projected/b2cd783c-ef38-4478-9f86-60374f554bb2-kube-api-access-f6758\") pod \"keystone-operator-controller-manager-7765d96ddf-lx6dp\" (UID: \"b2cd783c-ef38-4478-9f86-60374f554bb2\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.312645 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert\") pod \"infra-operator-controller-manager-78d48bff9d-9s5rq\" (UID: \"b44a8ea9-ba71-486d-9672-44146f09acb1\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.313801 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.313957 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t85z9\" (UniqueName: \"kubernetes.io/projected/b44a8ea9-ba71-486d-9672-44146f09acb1-kube-api-access-t85z9\") pod \"infra-operator-controller-manager-78d48bff9d-9s5rq\" (UID: \"b44a8ea9-ba71-486d-9672-44146f09acb1\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:34 crc kubenswrapper[5016]: E1211 10:52:34.313653 5016 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 11 10:52:34 crc kubenswrapper[5016]: E1211 10:52:34.314331 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert podName:b44a8ea9-ba71-486d-9672-44146f09acb1 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:34.81430917 +0000 UTC m=+1071.632868849 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert") pod "infra-operator-controller-manager-78d48bff9d-9s5rq" (UID: "b44a8ea9-ba71-486d-9672-44146f09acb1") : secret "infra-operator-webhook-server-cert" not found Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.351834 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82tfg\" (UniqueName: \"kubernetes.io/projected/ac95cdf1-ed70-4d47-8b28-3f7f5e68804b-kube-api-access-82tfg\") pod \"ironic-operator-controller-manager-967d97867-48q65\" (UID: \"ac95cdf1-ed70-4d47-8b28-3f7f5e68804b\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-48q65" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.352196 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t85z9\" (UniqueName: \"kubernetes.io/projected/b44a8ea9-ba71-486d-9672-44146f09acb1-kube-api-access-t85z9\") pod \"infra-operator-controller-manager-78d48bff9d-9s5rq\" (UID: \"b44a8ea9-ba71-486d-9672-44146f09acb1\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.352487 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvm6q\" (UniqueName: \"kubernetes.io/projected/2f35a405-1590-4bd7-9f64-f897bac8e8e7-kube-api-access-xvm6q\") pod \"heat-operator-controller-manager-5f64f6f8bb-jpkdc\" (UID: \"2f35a405-1590-4bd7-9f64-f897bac8e8e7\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.358550 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d8nd\" (UniqueName: \"kubernetes.io/projected/daa29314-dcea-4026-9a51-7f9ceaed9052-kube-api-access-5d8nd\") pod \"horizon-operator-controller-manager-68c6d99b8f-tcntz\" (UID: \"daa29314-dcea-4026-9a51-7f9ceaed9052\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.367996 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.369887 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.371441 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.372578 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-lz9sv" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.394021 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-vcscv"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.395560 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vcscv" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.399704 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-zn4zd" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.410478 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.415159 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msbfq\" (UniqueName: \"kubernetes.io/projected/26861a3b-3eb1-4c65-8c69-2d43a2aab77c-kube-api-access-msbfq\") pod \"mariadb-operator-controller-manager-79c8c4686c-dfr98\" (UID: \"26861a3b-3eb1-4c65-8c69-2d43a2aab77c\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.415229 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6758\" (UniqueName: \"kubernetes.io/projected/b2cd783c-ef38-4478-9f86-60374f554bb2-kube-api-access-f6758\") pod \"keystone-operator-controller-manager-7765d96ddf-lx6dp\" (UID: \"b2cd783c-ef38-4478-9f86-60374f554bb2\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.415254 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kczql\" (UniqueName: \"kubernetes.io/projected/4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19-kube-api-access-kczql\") pod \"octavia-operator-controller-manager-998648c74-vcscv\" (UID: \"4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-vcscv" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.415307 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qptw9\" (UniqueName: \"kubernetes.io/projected/95b9a24e-2b04-4161-aee4-2b7a73330a4e-kube-api-access-qptw9\") pod \"manila-operator-controller-manager-5b5fd79c9c-46n5n\" (UID: \"95b9a24e-2b04-4161-aee4-2b7a73330a4e\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.415345 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fq6k\" (UniqueName: \"kubernetes.io/projected/cdf76c07-0127-402e-90d7-9c868594b4d7-kube-api-access-7fq6k\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-5qrxg\" (UID: \"cdf76c07-0127-402e-90d7-9c868594b4d7\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.415367 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fh22\" (UniqueName: \"kubernetes.io/projected/e4f0f2a5-a15b-45b8-96ea-91e37ea98237-kube-api-access-9fh22\") pod \"nova-operator-controller-manager-697bc559fc-vbw9f\" (UID: \"e4f0f2a5-a15b-45b8-96ea-91e37ea98237\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.418237 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.429988 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-vcscv"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.444187 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6758\" (UniqueName: \"kubernetes.io/projected/b2cd783c-ef38-4478-9f86-60374f554bb2-kube-api-access-f6758\") pod \"keystone-operator-controller-manager-7765d96ddf-lx6dp\" (UID: \"b2cd783c-ef38-4478-9f86-60374f554bb2\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.520596 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kczql\" (UniqueName: \"kubernetes.io/projected/4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19-kube-api-access-kczql\") pod \"octavia-operator-controller-manager-998648c74-vcscv\" (UID: \"4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-vcscv" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.520650 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qptw9\" (UniqueName: \"kubernetes.io/projected/95b9a24e-2b04-4161-aee4-2b7a73330a4e-kube-api-access-qptw9\") pod \"manila-operator-controller-manager-5b5fd79c9c-46n5n\" (UID: \"95b9a24e-2b04-4161-aee4-2b7a73330a4e\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.520694 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fq6k\" (UniqueName: \"kubernetes.io/projected/cdf76c07-0127-402e-90d7-9c868594b4d7-kube-api-access-7fq6k\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-5qrxg\" (UID: \"cdf76c07-0127-402e-90d7-9c868594b4d7\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.520722 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fh22\" (UniqueName: \"kubernetes.io/projected/e4f0f2a5-a15b-45b8-96ea-91e37ea98237-kube-api-access-9fh22\") pod \"nova-operator-controller-manager-697bc559fc-vbw9f\" (UID: \"e4f0f2a5-a15b-45b8-96ea-91e37ea98237\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.520765 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msbfq\" (UniqueName: \"kubernetes.io/projected/26861a3b-3eb1-4c65-8c69-2d43a2aab77c-kube-api-access-msbfq\") pod \"mariadb-operator-controller-manager-79c8c4686c-dfr98\" (UID: \"26861a3b-3eb1-4c65-8c69-2d43a2aab77c\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.545307 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-967d97867-48q65" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.571454 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fq6k\" (UniqueName: \"kubernetes.io/projected/cdf76c07-0127-402e-90d7-9c868594b4d7-kube-api-access-7fq6k\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-5qrxg\" (UID: \"cdf76c07-0127-402e-90d7-9c868594b4d7\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.571783 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kczql\" (UniqueName: \"kubernetes.io/projected/4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19-kube-api-access-kczql\") pod \"octavia-operator-controller-manager-998648c74-vcscv\" (UID: \"4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-vcscv" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.575888 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fh22\" (UniqueName: \"kubernetes.io/projected/e4f0f2a5-a15b-45b8-96ea-91e37ea98237-kube-api-access-9fh22\") pod \"nova-operator-controller-manager-697bc559fc-vbw9f\" (UID: \"e4f0f2a5-a15b-45b8-96ea-91e37ea98237\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.584418 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msbfq\" (UniqueName: \"kubernetes.io/projected/26861a3b-3eb1-4c65-8c69-2d43a2aab77c-kube-api-access-msbfq\") pod \"mariadb-operator-controller-manager-79c8c4686c-dfr98\" (UID: \"26861a3b-3eb1-4c65-8c69-2d43a2aab77c\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.589337 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qptw9\" (UniqueName: \"kubernetes.io/projected/95b9a24e-2b04-4161-aee4-2b7a73330a4e-kube-api-access-qptw9\") pod \"manila-operator-controller-manager-5b5fd79c9c-46n5n\" (UID: \"95b9a24e-2b04-4161-aee4-2b7a73330a4e\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.606147 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.610772 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.612296 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.615915 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-6hcks" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.616304 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.629440 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.638778 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.680134 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.699088 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.700853 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.703612 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-ssdb2" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.704757 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.725293 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fbtnxw\" (UID: \"dfea8003-afd2-45aa-bd7b-dcc5460e8a80\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.728506 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9xbj\" (UniqueName: \"kubernetes.io/projected/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-kube-api-access-g9xbj\") pod \"openstack-baremetal-operator-controller-manager-84b575879fbtnxw\" (UID: \"dfea8003-afd2-45aa-bd7b-dcc5460e8a80\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.738147 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.757065 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.785756 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-kwflq"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.787060 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-kwflq" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.788239 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vcscv" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.794582 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-ns7mt" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.819289 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-kwflq"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.829310 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fbtnxw\" (UID: \"dfea8003-afd2-45aa-bd7b-dcc5460e8a80\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.829362 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnvgr\" (UniqueName: \"kubernetes.io/projected/1099bcae-fea4-4864-8434-98ed888307e5-kube-api-access-pnvgr\") pod \"placement-operator-controller-manager-78f8948974-kwflq\" (UID: \"1099bcae-fea4-4864-8434-98ed888307e5\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-kwflq" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.829396 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert\") pod \"infra-operator-controller-manager-78d48bff9d-9s5rq\" (UID: \"b44a8ea9-ba71-486d-9672-44146f09acb1\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.829427 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9xbj\" (UniqueName: \"kubernetes.io/projected/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-kube-api-access-g9xbj\") pod \"openstack-baremetal-operator-controller-manager-84b575879fbtnxw\" (UID: \"dfea8003-afd2-45aa-bd7b-dcc5460e8a80\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.829446 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdzvc\" (UniqueName: \"kubernetes.io/projected/f07a07a1-b235-4d36-a666-2b1be3363f34-kube-api-access-jdzvc\") pod \"ovn-operator-controller-manager-b6456fdb6-wf9q5\" (UID: \"f07a07a1-b235-4d36-a666-2b1be3363f34\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5" Dec 11 10:52:34 crc kubenswrapper[5016]: E1211 10:52:34.829519 5016 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 11 10:52:34 crc kubenswrapper[5016]: E1211 10:52:34.829597 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert podName:dfea8003-afd2-45aa-bd7b-dcc5460e8a80 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:35.329574606 +0000 UTC m=+1072.148134185 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fbtnxw" (UID: "dfea8003-afd2-45aa-bd7b-dcc5460e8a80") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 11 10:52:34 crc kubenswrapper[5016]: E1211 10:52:34.829610 5016 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 11 10:52:34 crc kubenswrapper[5016]: E1211 10:52:34.829652 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert podName:b44a8ea9-ba71-486d-9672-44146f09acb1 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:35.829638197 +0000 UTC m=+1072.648197776 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert") pod "infra-operator-controller-manager-78d48bff9d-9s5rq" (UID: "b44a8ea9-ba71-486d-9672-44146f09acb1") : secret "infra-operator-webhook-server-cert" not found Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.836400 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.837861 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.843627 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-449x6" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.849883 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9xbj\" (UniqueName: \"kubernetes.io/projected/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-kube-api-access-g9xbj\") pod \"openstack-baremetal-operator-controller-manager-84b575879fbtnxw\" (UID: \"dfea8003-afd2-45aa-bd7b-dcc5460e8a80\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.850021 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.879659 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.882443 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.888211 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-qxvxw" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.915047 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-wphqn"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.921674 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.930569 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnvgr\" (UniqueName: \"kubernetes.io/projected/1099bcae-fea4-4864-8434-98ed888307e5-kube-api-access-pnvgr\") pod \"placement-operator-controller-manager-78f8948974-kwflq\" (UID: \"1099bcae-fea4-4864-8434-98ed888307e5\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-kwflq" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.930674 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdzvc\" (UniqueName: \"kubernetes.io/projected/f07a07a1-b235-4d36-a666-2b1be3363f34-kube-api-access-jdzvc\") pod \"ovn-operator-controller-manager-b6456fdb6-wf9q5\" (UID: \"f07a07a1-b235-4d36-a666-2b1be3363f34\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.930581 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-shdjd" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.950762 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnvgr\" (UniqueName: \"kubernetes.io/projected/1099bcae-fea4-4864-8434-98ed888307e5-kube-api-access-pnvgr\") pod \"placement-operator-controller-manager-78f8948974-kwflq\" (UID: \"1099bcae-fea4-4864-8434-98ed888307e5\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-kwflq" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.955390 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx"] Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.963742 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdzvc\" (UniqueName: \"kubernetes.io/projected/f07a07a1-b235-4d36-a666-2b1be3363f34-kube-api-access-jdzvc\") pod \"ovn-operator-controller-manager-b6456fdb6-wf9q5\" (UID: \"f07a07a1-b235-4d36-a666-2b1be3363f34\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5" Dec 11 10:52:34 crc kubenswrapper[5016]: I1211 10:52:34.998201 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-wphqn"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.009382 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.012399 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.028244 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.030434 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-jgwhz" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.031422 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqsmr\" (UniqueName: \"kubernetes.io/projected/5e9876fa-8ec4-432b-b582-6ee210b828b5-kube-api-access-nqsmr\") pod \"telemetry-operator-controller-manager-58d5ff84df-6jmdx\" (UID: \"5e9876fa-8ec4-432b-b582-6ee210b828b5\") " pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.032522 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q24xx\" (UniqueName: \"kubernetes.io/projected/1d57e8d7-5c81-4a4d-97a9-af4795392e5a-kube-api-access-q24xx\") pod \"swift-operator-controller-manager-9d58d64bc-5z7c4\" (UID: \"1d57e8d7-5c81-4a4d-97a9-af4795392e5a\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.032697 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv6b9\" (UniqueName: \"kubernetes.io/projected/542f9a19-fab3-426b-bb8a-e12a45e4e422-kube-api-access-jv6b9\") pod \"test-operator-controller-manager-5854674fcc-wphqn\" (UID: \"542f9a19-fab3-426b-bb8a-e12a45e4e422\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.031608 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-595db99498-vmll2"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.034424 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.037849 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-595db99498-vmll2"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.045331 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.053335 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.073332 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.073434 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.046396 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-fg9st" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.057686 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.059259 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.087442 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-5fr7c" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.110980 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-kwflq" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.138504 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.138558 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q24xx\" (UniqueName: \"kubernetes.io/projected/1d57e8d7-5c81-4a4d-97a9-af4795392e5a-kube-api-access-q24xx\") pod \"swift-operator-controller-manager-9d58d64bc-5z7c4\" (UID: \"1d57e8d7-5c81-4a4d-97a9-af4795392e5a\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.138588 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rlqr\" (UniqueName: \"kubernetes.io/projected/5adcacd2-730a-4cb7-9944-239289405003-kube-api-access-8rlqr\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.138620 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95csl\" (UniqueName: \"kubernetes.io/projected/e5d7cce6-369e-4837-8e40-385de0d684f7-kube-api-access-95csl\") pod \"rabbitmq-cluster-operator-manager-668c99d594-rhkn6\" (UID: \"e5d7cce6-369e-4837-8e40-385de0d684f7\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.138654 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv6b9\" (UniqueName: \"kubernetes.io/projected/542f9a19-fab3-426b-bb8a-e12a45e4e422-kube-api-access-jv6b9\") pod \"test-operator-controller-manager-5854674fcc-wphqn\" (UID: \"542f9a19-fab3-426b-bb8a-e12a45e4e422\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.138670 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.138686 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4b8h\" (UniqueName: \"kubernetes.io/projected/82d121a5-e733-445f-be6e-bc96e3c162e2-kube-api-access-s4b8h\") pod \"watcher-operator-controller-manager-75944c9b7-92pjv\" (UID: \"82d121a5-e733-445f-be6e-bc96e3c162e2\") " pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.138709 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqsmr\" (UniqueName: \"kubernetes.io/projected/5e9876fa-8ec4-432b-b582-6ee210b828b5-kube-api-access-nqsmr\") pod \"telemetry-operator-controller-manager-58d5ff84df-6jmdx\" (UID: \"5e9876fa-8ec4-432b-b582-6ee210b828b5\") " pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.145447 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.184319 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqsmr\" (UniqueName: \"kubernetes.io/projected/5e9876fa-8ec4-432b-b582-6ee210b828b5-kube-api-access-nqsmr\") pod \"telemetry-operator-controller-manager-58d5ff84df-6jmdx\" (UID: \"5e9876fa-8ec4-432b-b582-6ee210b828b5\") " pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.192676 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv6b9\" (UniqueName: \"kubernetes.io/projected/542f9a19-fab3-426b-bb8a-e12a45e4e422-kube-api-access-jv6b9\") pod \"test-operator-controller-manager-5854674fcc-wphqn\" (UID: \"542f9a19-fab3-426b-bb8a-e12a45e4e422\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.193330 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q24xx\" (UniqueName: \"kubernetes.io/projected/1d57e8d7-5c81-4a4d-97a9-af4795392e5a-kube-api-access-q24xx\") pod \"swift-operator-controller-manager-9d58d64bc-5z7c4\" (UID: \"1d57e8d7-5c81-4a4d-97a9-af4795392e5a\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.212026 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.239606 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.239648 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4b8h\" (UniqueName: \"kubernetes.io/projected/82d121a5-e733-445f-be6e-bc96e3c162e2-kube-api-access-s4b8h\") pod \"watcher-operator-controller-manager-75944c9b7-92pjv\" (UID: \"82d121a5-e733-445f-be6e-bc96e3c162e2\") " pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.239704 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.239740 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rlqr\" (UniqueName: \"kubernetes.io/projected/5adcacd2-730a-4cb7-9944-239289405003-kube-api-access-8rlqr\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.239777 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95csl\" (UniqueName: \"kubernetes.io/projected/e5d7cce6-369e-4837-8e40-385de0d684f7-kube-api-access-95csl\") pod \"rabbitmq-cluster-operator-manager-668c99d594-rhkn6\" (UID: \"e5d7cce6-369e-4837-8e40-385de0d684f7\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6" Dec 11 10:52:35 crc kubenswrapper[5016]: E1211 10:52:35.240527 5016 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 11 10:52:35 crc kubenswrapper[5016]: E1211 10:52:35.240566 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs podName:5adcacd2-730a-4cb7-9944-239289405003 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:35.740553156 +0000 UTC m=+1072.559112735 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs") pod "openstack-operator-controller-manager-595db99498-vmll2" (UID: "5adcacd2-730a-4cb7-9944-239289405003") : secret "webhook-server-cert" not found Dec 11 10:52:35 crc kubenswrapper[5016]: E1211 10:52:35.240854 5016 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 11 10:52:35 crc kubenswrapper[5016]: E1211 10:52:35.240877 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs podName:5adcacd2-730a-4cb7-9944-239289405003 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:35.740870143 +0000 UTC m=+1072.559429722 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs") pod "openstack-operator-controller-manager-595db99498-vmll2" (UID: "5adcacd2-730a-4cb7-9944-239289405003") : secret "metrics-server-cert" not found Dec 11 10:52:35 crc kubenswrapper[5016]: W1211 10:52:35.250072 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde234c3f_f96c_444d_a7f5_a453df14d2e4.slice/crio-0aef468979ff4367db22bc2acbcf0f46a6b3ab4c2207a027ffc56b13ec706f0a WatchSource:0}: Error finding container 0aef468979ff4367db22bc2acbcf0f46a6b3ab4c2207a027ffc56b13ec706f0a: Status 404 returned error can't find the container with id 0aef468979ff4367db22bc2acbcf0f46a6b3ab4c2207a027ffc56b13ec706f0a Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.252242 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.267864 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.308983 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4b8h\" (UniqueName: \"kubernetes.io/projected/82d121a5-e733-445f-be6e-bc96e3c162e2-kube-api-access-s4b8h\") pod \"watcher-operator-controller-manager-75944c9b7-92pjv\" (UID: \"82d121a5-e733-445f-be6e-bc96e3c162e2\") " pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.309767 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95csl\" (UniqueName: \"kubernetes.io/projected/e5d7cce6-369e-4837-8e40-385de0d684f7-kube-api-access-95csl\") pod \"rabbitmq-cluster-operator-manager-668c99d594-rhkn6\" (UID: \"e5d7cce6-369e-4837-8e40-385de0d684f7\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.310457 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rlqr\" (UniqueName: \"kubernetes.io/projected/5adcacd2-730a-4cb7-9944-239289405003-kube-api-access-8rlqr\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.320446 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.344677 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.351727 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fbtnxw\" (UID: \"dfea8003-afd2-45aa-bd7b-dcc5460e8a80\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:35 crc kubenswrapper[5016]: E1211 10:52:35.352485 5016 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 11 10:52:35 crc kubenswrapper[5016]: E1211 10:52:35.352528 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert podName:dfea8003-afd2-45aa-bd7b-dcc5460e8a80 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:36.3525145 +0000 UTC m=+1073.171074079 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fbtnxw" (UID: "dfea8003-afd2-45aa-bd7b-dcc5460e8a80") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.361400 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.409530 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.415145 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc"] Dec 11 10:52:35 crc kubenswrapper[5016]: W1211 10:52:35.468675 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2f35a405_1590_4bd7_9f64_f897bac8e8e7.slice/crio-3070d22c4c550665cbbbba087850b53508a2d038612b0cd62f49213a27602e6c WatchSource:0}: Error finding container 3070d22c4c550665cbbbba087850b53508a2d038612b0cd62f49213a27602e6c: Status 404 returned error can't find the container with id 3070d22c4c550665cbbbba087850b53508a2d038612b0cd62f49213a27602e6c Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.473320 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.514435 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp" event={"ID":"b1812840-a032-4c7a-a851-505f89b19063","Type":"ContainerStarted","Data":"b720383994c30b8a88bfa9522e6ffdb084b39f5784eaeb65b78f824d1aa3491c"} Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.519149 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-48q65"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.521981 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8" event={"ID":"de234c3f-f96c-444d-a7f5-a453df14d2e4","Type":"ContainerStarted","Data":"0aef468979ff4367db22bc2acbcf0f46a6b3ab4c2207a027ffc56b13ec706f0a"} Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.532785 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf" event={"ID":"251e6e53-bbba-4d67-a361-44c471db70ff","Type":"ContainerStarted","Data":"2f1629000c00a5b7ec805bf2ba34281760210e225406fdce1b2a41c178178ede"} Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.533645 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv" event={"ID":"cc7c5322-f255-4c02-b684-d1bccf74eb1a","Type":"ContainerStarted","Data":"14b262549416e51655613b777277ac0fdbce2bc0c172884aad990daf5c6589e6"} Dec 11 10:52:35 crc kubenswrapper[5016]: W1211 10:52:35.549627 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddaa29314_dcea_4026_9a51_7f9ceaed9052.slice/crio-eda728ea0c85fb83a0f2fa803de455ca3d3222421e8508d38a51d9f652294878 WatchSource:0}: Error finding container eda728ea0c85fb83a0f2fa803de455ca3d3222421e8508d38a51d9f652294878: Status 404 returned error can't find the container with id eda728ea0c85fb83a0f2fa803de455ca3d3222421e8508d38a51d9f652294878 Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.555462 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" Dec 11 10:52:35 crc kubenswrapper[5016]: W1211 10:52:35.558332 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac95cdf1_ed70_4d47_8b28_3f7f5e68804b.slice/crio-020c598c5ee9691930624c5e126bdf2c038fbf3c1814b59c2ef9a1e130afe6e4 WatchSource:0}: Error finding container 020c598c5ee9691930624c5e126bdf2c038fbf3c1814b59c2ef9a1e130afe6e4: Status 404 returned error can't find the container with id 020c598c5ee9691930624c5e126bdf2c038fbf3c1814b59c2ef9a1e130afe6e4 Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.585812 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.762102 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.762527 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:35 crc kubenswrapper[5016]: E1211 10:52:35.762327 5016 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 11 10:52:35 crc kubenswrapper[5016]: E1211 10:52:35.762623 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs podName:5adcacd2-730a-4cb7-9944-239289405003 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:36.762598959 +0000 UTC m=+1073.581158538 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs") pod "openstack-operator-controller-manager-595db99498-vmll2" (UID: "5adcacd2-730a-4cb7-9944-239289405003") : secret "webhook-server-cert" not found Dec 11 10:52:35 crc kubenswrapper[5016]: E1211 10:52:35.762688 5016 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 11 10:52:35 crc kubenswrapper[5016]: E1211 10:52:35.762742 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs podName:5adcacd2-730a-4cb7-9944-239289405003 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:36.762728202 +0000 UTC m=+1073.581287781 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs") pod "openstack-operator-controller-manager-595db99498-vmll2" (UID: "5adcacd2-730a-4cb7-9944-239289405003") : secret "metrics-server-cert" not found Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.829280 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.864785 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert\") pod \"infra-operator-controller-manager-78d48bff9d-9s5rq\" (UID: \"b44a8ea9-ba71-486d-9672-44146f09acb1\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:35 crc kubenswrapper[5016]: E1211 10:52:35.865089 5016 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 11 10:52:35 crc kubenswrapper[5016]: E1211 10:52:35.865221 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert podName:b44a8ea9-ba71-486d-9672-44146f09acb1 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:37.865189542 +0000 UTC m=+1074.683749121 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert") pod "infra-operator-controller-manager-78d48bff9d-9s5rq" (UID: "b44a8ea9-ba71-486d-9672-44146f09acb1") : secret "infra-operator-webhook-server-cert" not found Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.931878 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-vcscv"] Dec 11 10:52:35 crc kubenswrapper[5016]: I1211 10:52:35.976274 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98"] Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.143014 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n"] Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.177390 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f"] Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.184748 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-kwflq"] Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.189921 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-wphqn"] Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.221593 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jv6b9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-wphqn_openstack-operators(542f9a19-fab3-426b-bb8a-e12a45e4e422): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.223646 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jv6b9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-wphqn_openstack-operators(542f9a19-fab3-426b-bb8a-e12a45e4e422): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.224861 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" podUID="542f9a19-fab3-426b-bb8a-e12a45e4e422" Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.236853 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx"] Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.244553 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5"] Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.271237 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:f27e732ec1faee765461bf137d9be81278b2fa39675019a73622755e1e610b6f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nqsmr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-58d5ff84df-6jmdx_openstack-operators(5e9876fa-8ec4-432b-b582-6ee210b828b5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.271624 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6"] Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.274920 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nqsmr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-58d5ff84df-6jmdx_openstack-operators(5e9876fa-8ec4-432b-b582-6ee210b828b5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.275281 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q24xx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-9d58d64bc-5z7c4_openstack-operators(1d57e8d7-5c81-4a4d-97a9-af4795392e5a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.276347 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" podUID="5e9876fa-8ec4-432b-b582-6ee210b828b5" Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.277151 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q24xx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-9d58d64bc-5z7c4_openstack-operators(1d57e8d7-5c81-4a4d-97a9-af4795392e5a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.278343 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4"] Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.278438 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" podUID="1d57e8d7-5c81-4a4d-97a9-af4795392e5a" Dec 11 10:52:36 crc kubenswrapper[5016]: W1211 10:52:36.286429 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5d7cce6_369e_4837_8e40_385de0d684f7.slice/crio-03165d99884a7c2ac97bc3899823f1f92d75aeca2b7813a7edc941c6ff7da012 WatchSource:0}: Error finding container 03165d99884a7c2ac97bc3899823f1f92d75aeca2b7813a7edc941c6ff7da012: Status 404 returned error can't find the container with id 03165d99884a7c2ac97bc3899823f1f92d75aeca2b7813a7edc941c6ff7da012 Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.289493 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-95csl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-rhkn6_openstack-operators(e5d7cce6-369e-4837-8e40-385de0d684f7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.290798 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6" podUID="e5d7cce6-369e-4837-8e40-385de0d684f7" Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.359827 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv"] Dec 11 10:52:36 crc kubenswrapper[5016]: W1211 10:52:36.378860 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod82d121a5_e733_445f_be6e_bc96e3c162e2.slice/crio-39a801beff9b3c1db0fb5ed63630a2b939a68271c0a20cfd008b49b7b506492b WatchSource:0}: Error finding container 39a801beff9b3c1db0fb5ed63630a2b939a68271c0a20cfd008b49b7b506492b: Status 404 returned error can't find the container with id 39a801beff9b3c1db0fb5ed63630a2b939a68271c0a20cfd008b49b7b506492b Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.382672 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:961417d59f527d925ac48ff6a11de747d0493315e496e34dc83d76a1a1fff58a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s4b8h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-75944c9b7-92pjv_openstack-operators(82d121a5-e733-445f-be6e-bc96e3c162e2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.384449 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fbtnxw\" (UID: \"dfea8003-afd2-45aa-bd7b-dcc5460e8a80\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.384640 5016 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.384708 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert podName:dfea8003-afd2-45aa-bd7b-dcc5460e8a80 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:38.384687144 +0000 UTC m=+1075.203246723 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fbtnxw" (UID: "dfea8003-afd2-45aa-bd7b-dcc5460e8a80") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.385809 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s4b8h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-75944c9b7-92pjv_openstack-operators(82d121a5-e733-445f-be6e-bc96e3c162e2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.386929 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" podUID="82d121a5-e733-445f-be6e-bc96e3c162e2" Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.541181 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" event={"ID":"82d121a5-e733-445f-be6e-bc96e3c162e2","Type":"ContainerStarted","Data":"39a801beff9b3c1db0fb5ed63630a2b939a68271c0a20cfd008b49b7b506492b"} Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.543285 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc" event={"ID":"2f35a405-1590-4bd7-9f64-f897bac8e8e7","Type":"ContainerStarted","Data":"3070d22c4c550665cbbbba087850b53508a2d038612b0cd62f49213a27602e6c"} Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.543710 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:961417d59f527d925ac48ff6a11de747d0493315e496e34dc83d76a1a1fff58a\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" podUID="82d121a5-e733-445f-be6e-bc96e3c162e2" Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.544648 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n" event={"ID":"95b9a24e-2b04-4161-aee4-2b7a73330a4e","Type":"ContainerStarted","Data":"b3b7a2604c5549328454f546081f98892f0c8e8409af464c946f0aaefa4238a1"} Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.545826 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg" event={"ID":"cdf76c07-0127-402e-90d7-9c868594b4d7","Type":"ContainerStarted","Data":"750f18d5f6e7f61c9e958c13173e8cac9de4e9d5cf9a6783cbc36335176f1c16"} Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.547090 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-48q65" event={"ID":"ac95cdf1-ed70-4d47-8b28-3f7f5e68804b","Type":"ContainerStarted","Data":"020c598c5ee9691930624c5e126bdf2c038fbf3c1814b59c2ef9a1e130afe6e4"} Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.548135 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" event={"ID":"542f9a19-fab3-426b-bb8a-e12a45e4e422","Type":"ContainerStarted","Data":"f8ffa92f7c6d8c1ff2b4798e2b34a93604d4c0e331b66ce084eaddcf6fc11052"} Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.549081 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" event={"ID":"1d57e8d7-5c81-4a4d-97a9-af4795392e5a","Type":"ContainerStarted","Data":"473bc88383b081a9ec9161e83524b2189ed8b3dff2ebb3ff14bbf5755d57226d"} Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.550143 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" podUID="542f9a19-fab3-426b-bb8a-e12a45e4e422" Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.550168 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6" event={"ID":"e5d7cce6-369e-4837-8e40-385de0d684f7","Type":"ContainerStarted","Data":"03165d99884a7c2ac97bc3899823f1f92d75aeca2b7813a7edc941c6ff7da012"} Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.551280 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" podUID="1d57e8d7-5c81-4a4d-97a9-af4795392e5a" Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.551564 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-kwflq" event={"ID":"1099bcae-fea4-4864-8434-98ed888307e5","Type":"ContainerStarted","Data":"661cf177a9b03527ad8e5ff66fffdcffec457930eea2853b7edf8645f931cee4"} Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.551917 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6" podUID="e5d7cce6-369e-4837-8e40-385de0d684f7" Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.552986 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vcscv" event={"ID":"4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19","Type":"ContainerStarted","Data":"769b0966ac8af4fa435abdacd330cb24c51f7284c0dc76263057568f3553bdcc"} Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.554083 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5" event={"ID":"f07a07a1-b235-4d36-a666-2b1be3363f34","Type":"ContainerStarted","Data":"a75ff38bb56593546785c199594341baa3e3a87c23e043050766db74dfd7e0ba"} Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.555028 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz" event={"ID":"daa29314-dcea-4026-9a51-7f9ceaed9052","Type":"ContainerStarted","Data":"eda728ea0c85fb83a0f2fa803de455ca3d3222421e8508d38a51d9f652294878"} Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.556388 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f" event={"ID":"e4f0f2a5-a15b-45b8-96ea-91e37ea98237","Type":"ContainerStarted","Data":"b781f5d57a08100ba00bdb1322581b2d724e13e6955f255d54b064ac5f7abd2f"} Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.557858 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp" event={"ID":"b2cd783c-ef38-4478-9f86-60374f554bb2","Type":"ContainerStarted","Data":"876de348d8450dfa4757a786c6803d5c7239dca22c5b960778fe1cfc994aa64f"} Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.560717 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98" event={"ID":"26861a3b-3eb1-4c65-8c69-2d43a2aab77c","Type":"ContainerStarted","Data":"193c66c75bc1b1ffd92337a3ddecdc3d7c911af640cd38522dc2f637c24859c4"} Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.562176 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" event={"ID":"5e9876fa-8ec4-432b-b582-6ee210b828b5","Type":"ContainerStarted","Data":"485b239ed7dc9f6b8f15afd87df4a744d38bc6698e87284375e63a2ac82095c0"} Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.566130 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:f27e732ec1faee765461bf137d9be81278b2fa39675019a73622755e1e610b6f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" podUID="5e9876fa-8ec4-432b-b582-6ee210b828b5" Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.792035 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:36 crc kubenswrapper[5016]: I1211 10:52:36.792235 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.792284 5016 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.792635 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs podName:5adcacd2-730a-4cb7-9944-239289405003 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:38.7926004 +0000 UTC m=+1075.611160119 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs") pod "openstack-operator-controller-manager-595db99498-vmll2" (UID: "5adcacd2-730a-4cb7-9944-239289405003") : secret "webhook-server-cert" not found Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.792662 5016 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 11 10:52:36 crc kubenswrapper[5016]: E1211 10:52:36.792758 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs podName:5adcacd2-730a-4cb7-9944-239289405003 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:38.792735363 +0000 UTC m=+1075.611294942 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs") pod "openstack-operator-controller-manager-595db99498-vmll2" (UID: "5adcacd2-730a-4cb7-9944-239289405003") : secret "metrics-server-cert" not found Dec 11 10:52:37 crc kubenswrapper[5016]: E1211 10:52:37.591202 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6" podUID="e5d7cce6-369e-4837-8e40-385de0d684f7" Dec 11 10:52:37 crc kubenswrapper[5016]: E1211 10:52:37.596274 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:f27e732ec1faee765461bf137d9be81278b2fa39675019a73622755e1e610b6f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" podUID="5e9876fa-8ec4-432b-b582-6ee210b828b5" Dec 11 10:52:37 crc kubenswrapper[5016]: E1211 10:52:37.596395 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" podUID="1d57e8d7-5c81-4a4d-97a9-af4795392e5a" Dec 11 10:52:37 crc kubenswrapper[5016]: E1211 10:52:37.596454 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" podUID="542f9a19-fab3-426b-bb8a-e12a45e4e422" Dec 11 10:52:37 crc kubenswrapper[5016]: E1211 10:52:37.596509 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:961417d59f527d925ac48ff6a11de747d0493315e496e34dc83d76a1a1fff58a\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" podUID="82d121a5-e733-445f-be6e-bc96e3c162e2" Dec 11 10:52:37 crc kubenswrapper[5016]: I1211 10:52:37.922840 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert\") pod \"infra-operator-controller-manager-78d48bff9d-9s5rq\" (UID: \"b44a8ea9-ba71-486d-9672-44146f09acb1\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:37 crc kubenswrapper[5016]: E1211 10:52:37.924639 5016 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 11 10:52:37 crc kubenswrapper[5016]: E1211 10:52:37.924766 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert podName:b44a8ea9-ba71-486d-9672-44146f09acb1 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:41.924725131 +0000 UTC m=+1078.743284710 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert") pod "infra-operator-controller-manager-78d48bff9d-9s5rq" (UID: "b44a8ea9-ba71-486d-9672-44146f09acb1") : secret "infra-operator-webhook-server-cert" not found Dec 11 10:52:38 crc kubenswrapper[5016]: I1211 10:52:38.438392 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fbtnxw\" (UID: \"dfea8003-afd2-45aa-bd7b-dcc5460e8a80\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:38 crc kubenswrapper[5016]: E1211 10:52:38.438537 5016 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 11 10:52:38 crc kubenswrapper[5016]: E1211 10:52:38.438581 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert podName:dfea8003-afd2-45aa-bd7b-dcc5460e8a80 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:42.438568882 +0000 UTC m=+1079.257128451 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fbtnxw" (UID: "dfea8003-afd2-45aa-bd7b-dcc5460e8a80") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 11 10:52:38 crc kubenswrapper[5016]: I1211 10:52:38.849857 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:38 crc kubenswrapper[5016]: I1211 10:52:38.850219 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:38 crc kubenswrapper[5016]: E1211 10:52:38.850617 5016 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 11 10:52:38 crc kubenswrapper[5016]: E1211 10:52:38.850682 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs podName:5adcacd2-730a-4cb7-9944-239289405003 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:42.850663419 +0000 UTC m=+1079.669222998 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs") pod "openstack-operator-controller-manager-595db99498-vmll2" (UID: "5adcacd2-730a-4cb7-9944-239289405003") : secret "webhook-server-cert" not found Dec 11 10:52:38 crc kubenswrapper[5016]: E1211 10:52:38.851372 5016 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 11 10:52:38 crc kubenswrapper[5016]: E1211 10:52:38.851585 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs podName:5adcacd2-730a-4cb7-9944-239289405003 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:42.851522611 +0000 UTC m=+1079.670082190 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs") pod "openstack-operator-controller-manager-595db99498-vmll2" (UID: "5adcacd2-730a-4cb7-9944-239289405003") : secret "metrics-server-cert" not found Dec 11 10:52:41 crc kubenswrapper[5016]: I1211 10:52:41.998226 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert\") pod \"infra-operator-controller-manager-78d48bff9d-9s5rq\" (UID: \"b44a8ea9-ba71-486d-9672-44146f09acb1\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:41 crc kubenswrapper[5016]: E1211 10:52:41.999281 5016 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 11 10:52:41 crc kubenswrapper[5016]: E1211 10:52:41.999378 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert podName:b44a8ea9-ba71-486d-9672-44146f09acb1 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:49.999353971 +0000 UTC m=+1086.817913550 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert") pod "infra-operator-controller-manager-78d48bff9d-9s5rq" (UID: "b44a8ea9-ba71-486d-9672-44146f09acb1") : secret "infra-operator-webhook-server-cert" not found Dec 11 10:52:42 crc kubenswrapper[5016]: I1211 10:52:42.508436 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fbtnxw\" (UID: \"dfea8003-afd2-45aa-bd7b-dcc5460e8a80\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:42 crc kubenswrapper[5016]: E1211 10:52:42.508676 5016 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 11 10:52:42 crc kubenswrapper[5016]: E1211 10:52:42.508746 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert podName:dfea8003-afd2-45aa-bd7b-dcc5460e8a80 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:50.508727683 +0000 UTC m=+1087.327287262 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fbtnxw" (UID: "dfea8003-afd2-45aa-bd7b-dcc5460e8a80") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 11 10:52:42 crc kubenswrapper[5016]: I1211 10:52:42.917870 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:42 crc kubenswrapper[5016]: I1211 10:52:42.917986 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:42 crc kubenswrapper[5016]: E1211 10:52:42.918067 5016 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 11 10:52:42 crc kubenswrapper[5016]: E1211 10:52:42.918137 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs podName:5adcacd2-730a-4cb7-9944-239289405003 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:50.918120654 +0000 UTC m=+1087.736680233 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs") pod "openstack-operator-controller-manager-595db99498-vmll2" (UID: "5adcacd2-730a-4cb7-9944-239289405003") : secret "webhook-server-cert" not found Dec 11 10:52:42 crc kubenswrapper[5016]: E1211 10:52:42.918160 5016 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 11 10:52:42 crc kubenswrapper[5016]: E1211 10:52:42.918316 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs podName:5adcacd2-730a-4cb7-9944-239289405003 nodeName:}" failed. No retries permitted until 2025-12-11 10:52:50.918257678 +0000 UTC m=+1087.736817257 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs") pod "openstack-operator-controller-manager-595db99498-vmll2" (UID: "5adcacd2-730a-4cb7-9944-239289405003") : secret "metrics-server-cert" not found Dec 11 10:52:42 crc kubenswrapper[5016]: I1211 10:52:42.933191 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:52:42 crc kubenswrapper[5016]: I1211 10:52:42.933247 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:52:42 crc kubenswrapper[5016]: I1211 10:52:42.933304 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:52:42 crc kubenswrapper[5016]: I1211 10:52:42.933991 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"512f5c783f58cb8b023d09c68e6c5e485f14c303c2f06e1b8d93d73bedfab5d9"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 10:52:42 crc kubenswrapper[5016]: I1211 10:52:42.934063 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://512f5c783f58cb8b023d09c68e6c5e485f14c303c2f06e1b8d93d73bedfab5d9" gracePeriod=600 Dec 11 10:52:43 crc kubenswrapper[5016]: I1211 10:52:43.732917 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="512f5c783f58cb8b023d09c68e6c5e485f14c303c2f06e1b8d93d73bedfab5d9" exitCode=0 Dec 11 10:52:43 crc kubenswrapper[5016]: I1211 10:52:43.733168 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"512f5c783f58cb8b023d09c68e6c5e485f14c303c2f06e1b8d93d73bedfab5d9"} Dec 11 10:52:43 crc kubenswrapper[5016]: I1211 10:52:43.733290 5016 scope.go:117] "RemoveContainer" containerID="5b248f167297cc4b041da14181a2d07d9de5add0be6fdd5562f37c434da09668" Dec 11 10:52:50 crc kubenswrapper[5016]: I1211 10:52:50.031251 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert\") pod \"infra-operator-controller-manager-78d48bff9d-9s5rq\" (UID: \"b44a8ea9-ba71-486d-9672-44146f09acb1\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:50 crc kubenswrapper[5016]: I1211 10:52:50.038360 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b44a8ea9-ba71-486d-9672-44146f09acb1-cert\") pod \"infra-operator-controller-manager-78d48bff9d-9s5rq\" (UID: \"b44a8ea9-ba71-486d-9672-44146f09acb1\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:50 crc kubenswrapper[5016]: I1211 10:52:50.055684 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-bg9m4" Dec 11 10:52:50 crc kubenswrapper[5016]: I1211 10:52:50.425968 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:52:50 crc kubenswrapper[5016]: I1211 10:52:50.541257 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fbtnxw\" (UID: \"dfea8003-afd2-45aa-bd7b-dcc5460e8a80\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:50 crc kubenswrapper[5016]: I1211 10:52:50.549639 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dfea8003-afd2-45aa-bd7b-dcc5460e8a80-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fbtnxw\" (UID: \"dfea8003-afd2-45aa-bd7b-dcc5460e8a80\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:50 crc kubenswrapper[5016]: I1211 10:52:50.588050 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-6hcks" Dec 11 10:52:50 crc kubenswrapper[5016]: I1211 10:52:50.598670 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:52:50 crc kubenswrapper[5016]: I1211 10:52:50.948041 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:50 crc kubenswrapper[5016]: I1211 10:52:50.948141 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:50 crc kubenswrapper[5016]: E1211 10:52:50.948267 5016 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 11 10:52:50 crc kubenswrapper[5016]: E1211 10:52:50.948323 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs podName:5adcacd2-730a-4cb7-9944-239289405003 nodeName:}" failed. No retries permitted until 2025-12-11 10:53:06.948308196 +0000 UTC m=+1103.766867765 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs") pod "openstack-operator-controller-manager-595db99498-vmll2" (UID: "5adcacd2-730a-4cb7-9944-239289405003") : secret "webhook-server-cert" not found Dec 11 10:52:50 crc kubenswrapper[5016]: I1211 10:52:50.954683 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-metrics-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:52:51 crc kubenswrapper[5016]: E1211 10:52:51.945269 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168" Dec 11 10:52:51 crc kubenswrapper[5016]: E1211 10:52:51.945748 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kczql,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-vcscv_openstack-operators(4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:52:54 crc kubenswrapper[5016]: E1211 10:52:54.320266 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670" Dec 11 10:52:54 crc kubenswrapper[5016]: E1211 10:52:54.321163 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9fh22,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-vbw9f_openstack-operators(e4f0f2a5-a15b-45b8-96ea-91e37ea98237): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:52:55 crc kubenswrapper[5016]: E1211 10:52:55.418761 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7" Dec 11 10:52:55 crc kubenswrapper[5016]: E1211 10:52:55.418961 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-f6758,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7765d96ddf-lx6dp_openstack-operators(b2cd783c-ef38-4478-9f86-60374f554bb2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:53:01 crc kubenswrapper[5016]: I1211 10:53:01.502696 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw"] Dec 11 10:53:01 crc kubenswrapper[5016]: I1211 10:53:01.589315 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq"] Dec 11 10:53:01 crc kubenswrapper[5016]: I1211 10:53:01.898969 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz" event={"ID":"daa29314-dcea-4026-9a51-7f9ceaed9052","Type":"ContainerStarted","Data":"dcfd7b2d60eadb5e4752f697eb811b87de1c3b6f951e77ec3ba6bf4ec8d84a6f"} Dec 11 10:53:01 crc kubenswrapper[5016]: I1211 10:53:01.905485 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc" event={"ID":"2f35a405-1590-4bd7-9f64-f897bac8e8e7","Type":"ContainerStarted","Data":"bb03e1a6cfdf624de1f009c0aca0c3e0f9fa2fec7a1e09f2f4122c5d8f280be0"} Dec 11 10:53:01 crc kubenswrapper[5016]: I1211 10:53:01.911934 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"49f5883716361ecf20e37d0a33857b58813542483a33785fbd7c2c019dd8b594"} Dec 11 10:53:01 crc kubenswrapper[5016]: I1211 10:53:01.916753 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf" event={"ID":"251e6e53-bbba-4d67-a361-44c471db70ff","Type":"ContainerStarted","Data":"09d12322a603c167adab7949d61e6022e68171aa40788171807da5e563a5b767"} Dec 11 10:53:01 crc kubenswrapper[5016]: I1211 10:53:01.921898 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv" event={"ID":"cc7c5322-f255-4c02-b684-d1bccf74eb1a","Type":"ContainerStarted","Data":"ba744ec2547131b61e4b1f875a04efb2896d0475534def293022172e9192c8e0"} Dec 11 10:53:01 crc kubenswrapper[5016]: I1211 10:53:01.924774 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg" event={"ID":"cdf76c07-0127-402e-90d7-9c868594b4d7","Type":"ContainerStarted","Data":"8a1e9b15a80d667f44fa2264584f727d3576c3253fc5461acf705569d0f3de16"} Dec 11 10:53:01 crc kubenswrapper[5016]: I1211 10:53:01.926896 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-48q65" event={"ID":"ac95cdf1-ed70-4d47-8b28-3f7f5e68804b","Type":"ContainerStarted","Data":"a8544cab2f3766c7cef6e22b9f446af91e20efc3c4071ce88e9c78e27250952d"} Dec 11 10:53:01 crc kubenswrapper[5016]: I1211 10:53:01.948123 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8" event={"ID":"de234c3f-f96c-444d-a7f5-a453df14d2e4","Type":"ContainerStarted","Data":"0b39e456f6baa872b21ff7f30c4648e1a9a0626dd66466c65b3c71a5d376fc11"} Dec 11 10:53:03 crc kubenswrapper[5016]: I1211 10:53:02.999296 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98" event={"ID":"26861a3b-3eb1-4c65-8c69-2d43a2aab77c","Type":"ContainerStarted","Data":"72cfdf6274c948d4163e3611e11847b09a2216f94a894bcf9b6d29c46692ac9c"} Dec 11 10:53:03 crc kubenswrapper[5016]: I1211 10:53:03.005591 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" event={"ID":"dfea8003-afd2-45aa-bd7b-dcc5460e8a80","Type":"ContainerStarted","Data":"10bb3e0f3f4bddcb68cc86f0270e17a86f19902e1fa65896863a1465cb193c8b"} Dec 11 10:53:03 crc kubenswrapper[5016]: I1211 10:53:03.008376 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" event={"ID":"1d57e8d7-5c81-4a4d-97a9-af4795392e5a","Type":"ContainerStarted","Data":"4408d71ded57f801fb5ece3d3ddcb3c259033f598eb9a9a24647f8e67226c115"} Dec 11 10:53:03 crc kubenswrapper[5016]: I1211 10:53:03.013492 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-kwflq" event={"ID":"1099bcae-fea4-4864-8434-98ed888307e5","Type":"ContainerStarted","Data":"76d4077979b5c6719520d06d46a31e72553191055e1ffc85b567e4943e50d54f"} Dec 11 10:53:03 crc kubenswrapper[5016]: I1211 10:53:03.015048 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" event={"ID":"5e9876fa-8ec4-432b-b582-6ee210b828b5","Type":"ContainerStarted","Data":"5b6f37318de775d93033287bc7ae3bf8d22df613ec69eac42a77b7c67d25d521"} Dec 11 10:53:03 crc kubenswrapper[5016]: I1211 10:53:03.016216 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp" event={"ID":"b1812840-a032-4c7a-a851-505f89b19063","Type":"ContainerStarted","Data":"e4a606c73d11a7c9c50511bce56f6390614f4fa44add429d8b5143d8b3a8d41f"} Dec 11 10:53:03 crc kubenswrapper[5016]: I1211 10:53:03.017267 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5" event={"ID":"f07a07a1-b235-4d36-a666-2b1be3363f34","Type":"ContainerStarted","Data":"da62976187fedb626119a9da4856d83312272584304815d60bd8f788fb2e5b36"} Dec 11 10:53:03 crc kubenswrapper[5016]: I1211 10:53:03.018092 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" event={"ID":"b44a8ea9-ba71-486d-9672-44146f09acb1","Type":"ContainerStarted","Data":"7de1307f30a5afd64835749b72c510eec5bac559c45a719fe85d088e26fd0de1"} Dec 11 10:53:03 crc kubenswrapper[5016]: I1211 10:53:03.020585 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n" event={"ID":"95b9a24e-2b04-4161-aee4-2b7a73330a4e","Type":"ContainerStarted","Data":"e7e284a8e7387af7f93cdfcd247d90447da9a2ce19abf0116d36ae386ea42929"} Dec 11 10:53:05 crc kubenswrapper[5016]: I1211 10:53:05.038271 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" event={"ID":"82d121a5-e733-445f-be6e-bc96e3c162e2","Type":"ContainerStarted","Data":"1405d6060d4a9a8f32ad09c1defcc0e59c139745ad38442bccf27a56e2888135"} Dec 11 10:53:05 crc kubenswrapper[5016]: I1211 10:53:05.041678 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6" event={"ID":"e5d7cce6-369e-4837-8e40-385de0d684f7","Type":"ContainerStarted","Data":"c074c2627ed90894f226e5f1c2f8ca7d87752e9abeb9301df5c3cf9e158b2f5b"} Dec 11 10:53:05 crc kubenswrapper[5016]: I1211 10:53:05.059318 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rhkn6" podStartSLOduration=5.759538319 podStartE2EDuration="31.059297483s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:36.289330038 +0000 UTC m=+1073.107889617" lastFinishedPulling="2025-12-11 10:53:01.589089202 +0000 UTC m=+1098.407648781" observedRunningTime="2025-12-11 10:53:05.059081338 +0000 UTC m=+1101.877640927" watchObservedRunningTime="2025-12-11 10:53:05.059297483 +0000 UTC m=+1101.877857072" Dec 11 10:53:06 crc kubenswrapper[5016]: I1211 10:53:06.950057 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:53:06 crc kubenswrapper[5016]: I1211 10:53:06.965288 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5adcacd2-730a-4cb7-9944-239289405003-webhook-certs\") pod \"openstack-operator-controller-manager-595db99498-vmll2\" (UID: \"5adcacd2-730a-4cb7-9944-239289405003\") " pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:53:07 crc kubenswrapper[5016]: I1211 10:53:07.097094 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-fg9st" Dec 11 10:53:07 crc kubenswrapper[5016]: I1211 10:53:07.105156 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:53:22 crc kubenswrapper[5016]: I1211 10:53:22.165174 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" event={"ID":"542f9a19-fab3-426b-bb8a-e12a45e4e422","Type":"ContainerStarted","Data":"734b3230858159ffb7cba12c21eac07727162cb589386f3885b7d6e384653159"} Dec 11 10:53:22 crc kubenswrapper[5016]: I1211 10:53:22.814319 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 10:53:23 crc kubenswrapper[5016]: E1211 10:53:23.695181 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:9d539fb6b72f91cfc6200bb91b7c6dbaeab17c7711342dd3a9549c66762a2d48" Dec 11 10:53:23 crc kubenswrapper[5016]: E1211 10:53:23.695708 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:9d539fb6b72f91cfc6200bb91b7c6dbaeab17c7711342dd3a9549c66762a2d48,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g9xbj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-84b575879fbtnxw_openstack-operators(dfea8003-afd2-45aa-bd7b-dcc5460e8a80): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:53:24 crc kubenswrapper[5016]: E1211 10:53:24.571538 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:24 crc kubenswrapper[5016]: E1211 10:53:24.571723 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-f6758,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7765d96ddf-lx6dp_openstack-operators(b2cd783c-ef38-4478-9f86-60374f554bb2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:53:24 crc kubenswrapper[5016]: E1211 10:53:24.572926 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp" podUID="b2cd783c-ef38-4478-9f86-60374f554bb2" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.244553 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.244844 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fs4d5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7d9dfd778-qtvrv_openstack-operators(cc7c5322-f255-4c02-b684-d1bccf74eb1a): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.246361 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv" podUID="cc7c5322-f255-4c02-b684-d1bccf74eb1a" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.254537 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.255256 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-82tfg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-967d97867-48q65_openstack-operators(ac95cdf1-ed70-4d47-8b28-3f7f5e68804b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.255474 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.255613 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l7bhq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-5697bb5779-v8hsp_openstack-operators(b1812840-a032-4c7a-a851-505f89b19063): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.255780 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.255966 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pnvgr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-kwflq_openstack-operators(1099bcae-fea4-4864-8434-98ed888307e5): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.256131 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.256282 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5d8nd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-68c6d99b8f-tcntz_openstack-operators(daa29314-dcea-4026-9a51-7f9ceaed9052): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.256490 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-967d97867-48q65" podUID="ac95cdf1-ed70-4d47-8b28-3f7f5e68804b" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.256609 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.256764 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q24xx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-9d58d64bc-5z7c4_openstack-operators(1d57e8d7-5c81-4a4d-97a9-af4795392e5a): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.256952 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp" podUID="b1812840-a032-4c7a-a851-505f89b19063" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.257075 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.257179 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-78f8948974-kwflq" podUID="1099bcae-fea4-4864-8434-98ed888307e5" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.257367 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n7kdl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-697fb699cf-4hwxf_openstack-operators(251e6e53-bbba-4d67-a361-44c471db70ff): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.257503 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz" podUID="daa29314-dcea-4026-9a51-7f9ceaed9052" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.258185 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" podUID="1d57e8d7-5c81-4a4d-97a9-af4795392e5a" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.258755 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf" podUID="251e6e53-bbba-4d67-a361-44c471db70ff" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.265821 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/infra-operator@sha256:ccc60d56d8efc2e91a7d8a7131eb7e06c189c32247f2a819818c084ba2e2f2ab" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.266095 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:ccc60d56d8efc2e91a7d8a7131eb7e06c189c32247f2a819818c084ba2e2f2ab,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t85z9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-78d48bff9d-9s5rq_openstack-operators(b44a8ea9-ba71-486d-9672-44146f09acb1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.281909 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.282476 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lrbwp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-6c677c69b-rzjg8_openstack-operators(de234c3f-f96c-444d-a7f5-a453df14d2e4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.282916 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = reading blob sha256:4fa131a1b726b2d6468d461e7d8867a2157d5671f712461d8abd126155fdf9ce: Get \"https://quay.io/v2/openstack-k8s-operators/kube-rbac-proxy/blobs/sha256:4fa131a1b726b2d6468d461e7d8867a2157d5671f712461d8abd126155fdf9ce\": context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.283296 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qptw9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5b5fd79c9c-46n5n_openstack-operators(95b9a24e-2b04-4161-aee4-2b7a73330a4e): ErrImagePull: rpc error: code = Canceled desc = reading blob sha256:4fa131a1b726b2d6468d461e7d8867a2157d5671f712461d8abd126155fdf9ce: Get \"https://quay.io/v2/openstack-k8s-operators/kube-rbac-proxy/blobs/sha256:4fa131a1b726b2d6468d461e7d8867a2157d5671f712461d8abd126155fdf9ce\": context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.283811 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8" podUID="de234c3f-f96c-444d-a7f5-a453df14d2e4" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.284388 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = reading blob sha256:4fa131a1b726b2d6468d461e7d8867a2157d5671f712461d8abd126155fdf9ce: Get \\\"https://quay.io/v2/openstack-k8s-operators/kube-rbac-proxy/blobs/sha256:4fa131a1b726b2d6468d461e7d8867a2157d5671f712461d8abd126155fdf9ce\\\": context canceled\"" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n" podUID="95b9a24e-2b04-4161-aee4-2b7a73330a4e" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.284617 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.284732 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7fq6k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5fdfd5b6b5-5qrxg_openstack-operators(cdf76c07-0127-402e-90d7-9c868594b4d7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.285920 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.286246 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9fh22,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-vbw9f_openstack-operators(e4f0f2a5-a15b-45b8-96ea-91e37ea98237): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.286407 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.286848 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-msbfq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-79c8c4686c-dfr98_openstack-operators(26861a3b-3eb1-4c65-8c69-2d43a2aab77c): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.287071 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg" podUID="cdf76c07-0127-402e-90d7-9c868594b4d7" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.288607 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f" podUID="e4f0f2a5-a15b-45b8-96ea-91e37ea98237" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.288863 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98" podUID="26861a3b-3eb1-4c65-8c69-2d43a2aab77c" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.290395 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.290814 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nqsmr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-58d5ff84df-6jmdx_openstack-operators(5e9876fa-8ec4-432b-b582-6ee210b828b5): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.291985 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" podUID="5e9876fa-8ec4-432b-b582-6ee210b828b5" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.305755 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.306021 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kczql,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-vcscv_openstack-operators(4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.307432 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vcscv" podUID="4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.357636 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.357919 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jdzvc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-wf9q5_openstack-operators(f07a07a1-b235-4d36-a666-2b1be3363f34): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.359216 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5" podUID="f07a07a1-b235-4d36-a666-2b1be3363f34" Dec 11 10:53:25 crc kubenswrapper[5016]: E1211 10:53:25.658871 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" podUID="dfea8003-afd2-45aa-bd7b-dcc5460e8a80" Dec 11 10:53:25 crc kubenswrapper[5016]: I1211 10:53:25.769579 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-595db99498-vmll2"] Dec 11 10:53:25 crc kubenswrapper[5016]: W1211 10:53:25.770491 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5adcacd2_730a_4cb7_9944_239289405003.slice/crio-9ea87b134bf8e1e874c24970099c9337e2d837fa5e3b5e1038ad116262255efd WatchSource:0}: Error finding container 9ea87b134bf8e1e874c24970099c9337e2d837fa5e3b5e1038ad116262255efd: Status 404 returned error can't find the container with id 9ea87b134bf8e1e874c24970099c9337e2d837fa5e3b5e1038ad116262255efd Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.210595 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" event={"ID":"dfea8003-afd2-45aa-bd7b-dcc5460e8a80","Type":"ContainerStarted","Data":"5dfabd0fd43ed9b9b88c135d4e140170cdbc55e7b826f2fb120118fa450be34e"} Dec 11 10:53:26 crc kubenswrapper[5016]: E1211 10:53:26.212511 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:9d539fb6b72f91cfc6200bb91b7c6dbaeab17c7711342dd3a9549c66762a2d48\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" podUID="dfea8003-afd2-45aa-bd7b-dcc5460e8a80" Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.215692 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" event={"ID":"82d121a5-e733-445f-be6e-bc96e3c162e2","Type":"ContainerStarted","Data":"b1b22884df702591943a30a2393b964f3711cd98e45ec5d6669b6d2e81b1f746"} Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.216341 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.218831 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.219159 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" event={"ID":"542f9a19-fab3-426b-bb8a-e12a45e4e422","Type":"ContainerStarted","Data":"7c428461cdb516e9465e1549aa7f37e7566bc4925a7124ae6cbc3def0321c7e2"} Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.220321 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.221898 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc" event={"ID":"2f35a405-1590-4bd7-9f64-f897bac8e8e7","Type":"ContainerStarted","Data":"875d15b5cea4086591fa62d00dc068d34b1a759aef620c6aa2d5150706122741"} Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.222723 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc" Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.222860 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.224686 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc" Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.225304 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" event={"ID":"5adcacd2-730a-4cb7-9944-239289405003","Type":"ContainerStarted","Data":"cfe747dbb956ffc410324398fbf06bfad13f0a138c03c78739942f5608066866"} Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.225431 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" event={"ID":"5adcacd2-730a-4cb7-9944-239289405003","Type":"ContainerStarted","Data":"9ea87b134bf8e1e874c24970099c9337e2d837fa5e3b5e1038ad116262255efd"} Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.227558 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.227628 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.446659 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-jpkdc" podStartSLOduration=3.14168508 podStartE2EDuration="53.44663999s" podCreationTimestamp="2025-12-11 10:52:33 +0000 UTC" firstStartedPulling="2025-12-11 10:52:35.489428049 +0000 UTC m=+1072.307987628" lastFinishedPulling="2025-12-11 10:53:25.794382959 +0000 UTC m=+1122.612942538" observedRunningTime="2025-12-11 10:53:26.446388144 +0000 UTC m=+1123.264947733" watchObservedRunningTime="2025-12-11 10:53:26.44663999 +0000 UTC m=+1123.265199569" Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.486484 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-75944c9b7-92pjv" podStartSLOduration=3.499111541 podStartE2EDuration="52.486453438s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:36.382493951 +0000 UTC m=+1073.201053530" lastFinishedPulling="2025-12-11 10:53:25.369835858 +0000 UTC m=+1122.188395427" observedRunningTime="2025-12-11 10:53:26.477458717 +0000 UTC m=+1123.296018316" watchObservedRunningTime="2025-12-11 10:53:26.486453438 +0000 UTC m=+1123.305013017" Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.635403 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5854674fcc-wphqn" podStartSLOduration=3.483745956 podStartE2EDuration="52.635380204s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:36.221374507 +0000 UTC m=+1073.039934096" lastFinishedPulling="2025-12-11 10:53:25.373008765 +0000 UTC m=+1122.191568344" observedRunningTime="2025-12-11 10:53:26.635338643 +0000 UTC m=+1123.453898222" watchObservedRunningTime="2025-12-11 10:53:26.635380204 +0000 UTC m=+1123.453939803" Dec 11 10:53:26 crc kubenswrapper[5016]: I1211 10:53:26.753735 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" podStartSLOduration=52.753699508 podStartE2EDuration="52.753699508s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:53:26.736804003 +0000 UTC m=+1123.555363602" watchObservedRunningTime="2025-12-11 10:53:26.753699508 +0000 UTC m=+1123.572259087" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.106077 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.238742 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-kwflq" event={"ID":"1099bcae-fea4-4864-8434-98ed888307e5","Type":"ContainerStarted","Data":"5517311bd4185938cb1390095e8ec5c6b3767495cbed7fcca854f472cae810f8"} Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.240390 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-78f8948974-kwflq" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.244617 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" event={"ID":"5e9876fa-8ec4-432b-b582-6ee210b828b5","Type":"ContainerStarted","Data":"38366475f58dbf41b4754642777cd541a40b82631c7213a1f0a2009b7c027262"} Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.248691 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp" event={"ID":"b1812840-a032-4c7a-a851-505f89b19063","Type":"ContainerStarted","Data":"afcc9966f823b62558fbb055a5a682218492934c8570976d3c663ea41d096a72"} Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.250028 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.250428 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-78f8948974-kwflq" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.259579 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz" event={"ID":"daa29314-dcea-4026-9a51-7f9ceaed9052","Type":"ContainerStarted","Data":"f2d4811f405451c95d633b537c5f9d45becb15c3f57d1401ed5ef5b4633b4af7"} Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.260130 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.261575 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.270967 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.277298 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-78f8948974-kwflq" podStartSLOduration=34.489320753 podStartE2EDuration="53.27728152s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:36.145581151 +0000 UTC m=+1072.964140730" lastFinishedPulling="2025-12-11 10:52:54.933541918 +0000 UTC m=+1091.752101497" observedRunningTime="2025-12-11 10:53:27.277167218 +0000 UTC m=+1124.095726827" watchObservedRunningTime="2025-12-11 10:53:27.27728152 +0000 UTC m=+1124.095841099" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.279772 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf" event={"ID":"251e6e53-bbba-4d67-a361-44c471db70ff","Type":"ContainerStarted","Data":"d8f770c93d75ca288f656614f1346d5b7c9c87b36ca418b50175591966bbf10a"} Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.282198 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.291236 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.296583 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n" event={"ID":"95b9a24e-2b04-4161-aee4-2b7a73330a4e","Type":"ContainerStarted","Data":"5f31361b6aea898c08a629d426aaeb7c2964d132547bc9a39f33a9e85650f3b2"} Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.298788 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.304543 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n" Dec 11 10:53:27 crc kubenswrapper[5016]: E1211 10:53:27.337155 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:9d539fb6b72f91cfc6200bb91b7c6dbaeab17c7711342dd3a9549c66762a2d48\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" podUID="dfea8003-afd2-45aa-bd7b-dcc5460e8a80" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.454788 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-tcntz" podStartSLOduration=33.625287577 podStartE2EDuration="53.454754087s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:35.563982093 +0000 UTC m=+1072.382541672" lastFinishedPulling="2025-12-11 10:52:55.393448603 +0000 UTC m=+1092.212008182" observedRunningTime="2025-12-11 10:53:27.427639332 +0000 UTC m=+1124.246198921" watchObservedRunningTime="2025-12-11 10:53:27.454754087 +0000 UTC m=+1124.273313666" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.500656 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-58d5ff84df-6jmdx" podStartSLOduration=28.75251118 podStartE2EDuration="53.500625793s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:36.271065679 +0000 UTC m=+1073.089625258" lastFinishedPulling="2025-12-11 10:53:01.019180292 +0000 UTC m=+1097.837739871" observedRunningTime="2025-12-11 10:53:27.493431207 +0000 UTC m=+1124.311990796" watchObservedRunningTime="2025-12-11 10:53:27.500625793 +0000 UTC m=+1124.319185372" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.583896 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-v8hsp" podStartSLOduration=31.609283994 podStartE2EDuration="54.583872666s" podCreationTimestamp="2025-12-11 10:52:33 +0000 UTC" firstStartedPulling="2025-12-11 10:52:35.438857235 +0000 UTC m=+1072.257416814" lastFinishedPulling="2025-12-11 10:52:58.413445907 +0000 UTC m=+1095.232005486" observedRunningTime="2025-12-11 10:53:27.569988555 +0000 UTC m=+1124.388548144" watchObservedRunningTime="2025-12-11 10:53:27.583872666 +0000 UTC m=+1124.402432245" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.701325 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-46n5n" podStartSLOduration=34.427685524 podStartE2EDuration="53.701304549s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:36.119819848 +0000 UTC m=+1072.938379427" lastFinishedPulling="2025-12-11 10:52:55.393438873 +0000 UTC m=+1092.211998452" observedRunningTime="2025-12-11 10:53:27.699475385 +0000 UTC m=+1124.518034974" watchObservedRunningTime="2025-12-11 10:53:27.701304549 +0000 UTC m=+1124.519864138" Dec 11 10:53:27 crc kubenswrapper[5016]: I1211 10:53:27.799034 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-4hwxf" podStartSLOduration=35.306930447 podStartE2EDuration="54.798999847s" podCreationTimestamp="2025-12-11 10:52:33 +0000 UTC" firstStartedPulling="2025-12-11 10:52:35.418689708 +0000 UTC m=+1072.237249287" lastFinishedPulling="2025-12-11 10:52:54.910759108 +0000 UTC m=+1091.729318687" observedRunningTime="2025-12-11 10:53:27.755673914 +0000 UTC m=+1124.574233513" watchObservedRunningTime="2025-12-11 10:53:27.798999847 +0000 UTC m=+1124.617559426" Dec 11 10:53:28 crc kubenswrapper[5016]: E1211 10:53:28.122338 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" podUID="b44a8ea9-ba71-486d-9672-44146f09acb1" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.336299 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv" event={"ID":"cc7c5322-f255-4c02-b684-d1bccf74eb1a","Type":"ContainerStarted","Data":"a477fa4483e6306acc716ca639ffb7ceb6771510e6951f5164f320ce8758e318"} Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.337755 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.343696 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.350253 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg" event={"ID":"cdf76c07-0127-402e-90d7-9c868594b4d7","Type":"ContainerStarted","Data":"3568cc26f2bd0c06ebbc17a7884cfc9fdcc33a29d6a2b43863157a5d3803cc61"} Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.351299 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.357011 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.357568 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" event={"ID":"b44a8ea9-ba71-486d-9672-44146f09acb1","Type":"ContainerStarted","Data":"b0b3b272f35b6db9cd8017fe4fdcc3cacddf1ee9e6f9d7d9499c0c0a1a580c4c"} Dec 11 10:53:28 crc kubenswrapper[5016]: E1211 10:53:28.362540 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:ccc60d56d8efc2e91a7d8a7131eb7e06c189c32247f2a819818c084ba2e2f2ab\\\"\"" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" podUID="b44a8ea9-ba71-486d-9672-44146f09acb1" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.363341 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-qtvrv" podStartSLOduration=42.013277212 podStartE2EDuration="55.36331993s" podCreationTimestamp="2025-12-11 10:52:33 +0000 UTC" firstStartedPulling="2025-12-11 10:52:35.3045236 +0000 UTC m=+1072.123083179" lastFinishedPulling="2025-12-11 10:52:48.654566318 +0000 UTC m=+1085.473125897" observedRunningTime="2025-12-11 10:53:28.358711897 +0000 UTC m=+1125.177271476" watchObservedRunningTime="2025-12-11 10:53:28.36331993 +0000 UTC m=+1125.181879509" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.364961 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5" event={"ID":"f07a07a1-b235-4d36-a666-2b1be3363f34","Type":"ContainerStarted","Data":"5d3a4b2a029750ea92bec4f2b485ff5ee336da4cbb13ae5a0302be7c2d767c86"} Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.366394 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.375150 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-48q65" event={"ID":"ac95cdf1-ed70-4d47-8b28-3f7f5e68804b","Type":"ContainerStarted","Data":"dde7b04467f772b20f13551b7a5035ece385ad75a9d2d737917e61c30df37bde"} Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.375497 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.376036 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-967d97867-48q65" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.378040 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-967d97867-48q65" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.382317 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" event={"ID":"1d57e8d7-5c81-4a4d-97a9-af4795392e5a","Type":"ContainerStarted","Data":"423521a7649affba7885aa700834e9bd639e176940c1eb18c4186637f1de36b6"} Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.385057 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.389469 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f" event={"ID":"e4f0f2a5-a15b-45b8-96ea-91e37ea98237","Type":"ContainerStarted","Data":"eecfe438df5a1d73d6496f58459a37f74cd75a0b98cd9a91dd0f8668ee451432"} Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.389843 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.392551 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp" event={"ID":"b2cd783c-ef38-4478-9f86-60374f554bb2","Type":"ContainerStarted","Data":"14807a4dc4459741e7ce4a066635c6832259c920569870e2f8241f096c084a37"} Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.394130 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vcscv" event={"ID":"4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19","Type":"ContainerStarted","Data":"08c91fe1391ca56a1c3a3e5841057a5635262f130d32076b1a6cadfe40906cdb"} Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.400649 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98" event={"ID":"26861a3b-3eb1-4c65-8c69-2d43a2aab77c","Type":"ContainerStarted","Data":"d28726b99eabb4cc2b5a6aa8dbea650aeba95689fdaa551a97e352ee8fbba092"} Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.410994 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.424061 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.434101 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8" event={"ID":"de234c3f-f96c-444d-a7f5-a453df14d2e4","Type":"ContainerStarted","Data":"92916755810ec2bb5bf58f58f5f13074e7866df7e6dfda9b5780b602bcce2a8f"} Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.435620 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.452176 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.477851 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-5qrxg" podStartSLOduration=36.133925667 podStartE2EDuration="54.47780592s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:35.840161357 +0000 UTC m=+1072.658720936" lastFinishedPulling="2025-12-11 10:52:54.18404161 +0000 UTC m=+1091.002601189" observedRunningTime="2025-12-11 10:53:28.476736384 +0000 UTC m=+1125.295295963" watchObservedRunningTime="2025-12-11 10:53:28.47780592 +0000 UTC m=+1125.296365509" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.530660 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-967d97867-48q65" podStartSLOduration=35.159409951 podStartE2EDuration="54.530632487s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:35.564831154 +0000 UTC m=+1072.383390733" lastFinishedPulling="2025-12-11 10:52:54.93605369 +0000 UTC m=+1091.754613269" observedRunningTime="2025-12-11 10:53:28.516817938 +0000 UTC m=+1125.335377517" watchObservedRunningTime="2025-12-11 10:53:28.530632487 +0000 UTC m=+1125.349192066" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.681144 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-dfr98" podStartSLOduration=35.738443519 podStartE2EDuration="54.681109291s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:35.988581419 +0000 UTC m=+1072.807140998" lastFinishedPulling="2025-12-11 10:52:54.931247191 +0000 UTC m=+1091.749806770" observedRunningTime="2025-12-11 10:53:28.634163719 +0000 UTC m=+1125.452723298" watchObservedRunningTime="2025-12-11 10:53:28.681109291 +0000 UTC m=+1125.499668880" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.703263 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-wf9q5" podStartSLOduration=36.05357893 podStartE2EDuration="54.703244134s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:36.261375761 +0000 UTC m=+1073.079935340" lastFinishedPulling="2025-12-11 10:52:54.911040965 +0000 UTC m=+1091.729600544" observedRunningTime="2025-12-11 10:53:28.674357485 +0000 UTC m=+1125.492917074" watchObservedRunningTime="2025-12-11 10:53:28.703244134 +0000 UTC m=+1125.521803713" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.737665 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-5z7c4" podStartSLOduration=29.992868139 podStartE2EDuration="54.737632638s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:36.275195831 +0000 UTC m=+1073.093755410" lastFinishedPulling="2025-12-11 10:53:01.01996033 +0000 UTC m=+1097.838519909" observedRunningTime="2025-12-11 10:53:28.701673466 +0000 UTC m=+1125.520233045" watchObservedRunningTime="2025-12-11 10:53:28.737632638 +0000 UTC m=+1125.556192207" Dec 11 10:53:28 crc kubenswrapper[5016]: I1211 10:53:28.742920 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-rzjg8" podStartSLOduration=36.08632441 podStartE2EDuration="55.742895637s" podCreationTimestamp="2025-12-11 10:52:33 +0000 UTC" firstStartedPulling="2025-12-11 10:52:35.255015861 +0000 UTC m=+1072.073575430" lastFinishedPulling="2025-12-11 10:52:54.911587078 +0000 UTC m=+1091.730146657" observedRunningTime="2025-12-11 10:53:28.731152889 +0000 UTC m=+1125.549712468" watchObservedRunningTime="2025-12-11 10:53:28.742895637 +0000 UTC m=+1125.561455216" Dec 11 10:53:29 crc kubenswrapper[5016]: I1211 10:53:29.443990 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f" event={"ID":"e4f0f2a5-a15b-45b8-96ea-91e37ea98237","Type":"ContainerStarted","Data":"d8aa0bed4090cfc70a0d18379cbaa9f076504b0c1d3b9cf2e3a425a2c3ca99bb"} Dec 11 10:53:29 crc kubenswrapper[5016]: I1211 10:53:29.444800 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f" Dec 11 10:53:29 crc kubenswrapper[5016]: I1211 10:53:29.446905 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vcscv" event={"ID":"4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19","Type":"ContainerStarted","Data":"e6e998a167e14ad9d934985d4f88974e77f6aa4ba68d2c5095b7321ee1553266"} Dec 11 10:53:29 crc kubenswrapper[5016]: I1211 10:53:29.447438 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vcscv" Dec 11 10:53:29 crc kubenswrapper[5016]: I1211 10:53:29.450116 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp" event={"ID":"b2cd783c-ef38-4478-9f86-60374f554bb2","Type":"ContainerStarted","Data":"b700dfec5f6e43a72ad356ad08fc2d6d08fe05c07d6bb6162f42928faeba8105"} Dec 11 10:53:29 crc kubenswrapper[5016]: E1211 10:53:29.454257 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:ccc60d56d8efc2e91a7d8a7131eb7e06c189c32247f2a819818c084ba2e2f2ab\\\"\"" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" podUID="b44a8ea9-ba71-486d-9672-44146f09acb1" Dec 11 10:53:29 crc kubenswrapper[5016]: I1211 10:53:29.474715 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f" podStartSLOduration=4.21878245 podStartE2EDuration="55.474695201s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:36.116079815 +0000 UTC m=+1072.934639394" lastFinishedPulling="2025-12-11 10:53:27.371992566 +0000 UTC m=+1124.190552145" observedRunningTime="2025-12-11 10:53:29.469631207 +0000 UTC m=+1126.288190786" watchObservedRunningTime="2025-12-11 10:53:29.474695201 +0000 UTC m=+1126.293254800" Dec 11 10:53:29 crc kubenswrapper[5016]: I1211 10:53:29.498493 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vcscv" podStartSLOduration=4.111250724 podStartE2EDuration="55.498468825s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:35.983026592 +0000 UTC m=+1072.801586171" lastFinishedPulling="2025-12-11 10:53:27.370244703 +0000 UTC m=+1124.188804272" observedRunningTime="2025-12-11 10:53:29.4933758 +0000 UTC m=+1126.311935399" watchObservedRunningTime="2025-12-11 10:53:29.498468825 +0000 UTC m=+1126.317028404" Dec 11 10:53:29 crc kubenswrapper[5016]: I1211 10:53:29.536267 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp" podStartSLOduration=3.7690969450000003 podStartE2EDuration="55.536231652s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:52:35.615460249 +0000 UTC m=+1072.434019828" lastFinishedPulling="2025-12-11 10:53:27.382594956 +0000 UTC m=+1124.201154535" observedRunningTime="2025-12-11 10:53:29.534856948 +0000 UTC m=+1126.353416547" watchObservedRunningTime="2025-12-11 10:53:29.536231652 +0000 UTC m=+1126.354791241" Dec 11 10:53:30 crc kubenswrapper[5016]: I1211 10:53:30.457276 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp" Dec 11 10:53:34 crc kubenswrapper[5016]: I1211 10:53:34.610036 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-lx6dp" Dec 11 10:53:34 crc kubenswrapper[5016]: I1211 10:53:34.743601 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-vbw9f" Dec 11 10:53:34 crc kubenswrapper[5016]: I1211 10:53:34.794341 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-998648c74-vcscv" Dec 11 10:53:37 crc kubenswrapper[5016]: I1211 10:53:37.113313 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-595db99498-vmll2" Dec 11 10:53:39 crc kubenswrapper[5016]: I1211 10:53:39.530321 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" event={"ID":"dfea8003-afd2-45aa-bd7b-dcc5460e8a80","Type":"ContainerStarted","Data":"cbfbf8bbdb7db83cb95b7cd07a853ad95cded3dee763eaf2062cce5c86ece4cc"} Dec 11 10:53:39 crc kubenswrapper[5016]: I1211 10:53:39.531759 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:53:39 crc kubenswrapper[5016]: I1211 10:53:39.560594 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" podStartSLOduration=28.484327363 podStartE2EDuration="1m5.560574873s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:53:02.243268586 +0000 UTC m=+1099.061828165" lastFinishedPulling="2025-12-11 10:53:39.319516096 +0000 UTC m=+1136.138075675" observedRunningTime="2025-12-11 10:53:39.556283288 +0000 UTC m=+1136.374842887" watchObservedRunningTime="2025-12-11 10:53:39.560574873 +0000 UTC m=+1136.379134452" Dec 11 10:53:42 crc kubenswrapper[5016]: I1211 10:53:42.552549 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" event={"ID":"b44a8ea9-ba71-486d-9672-44146f09acb1","Type":"ContainerStarted","Data":"a42531578adff74f69211893ad828d4902ef9cabcbeab2e3fa4babf0724635bf"} Dec 11 10:53:42 crc kubenswrapper[5016]: I1211 10:53:42.553392 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:53:42 crc kubenswrapper[5016]: I1211 10:53:42.575033 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" podStartSLOduration=28.737854714 podStartE2EDuration="1m8.5750113s" podCreationTimestamp="2025-12-11 10:52:34 +0000 UTC" firstStartedPulling="2025-12-11 10:53:02.402237436 +0000 UTC m=+1099.220797015" lastFinishedPulling="2025-12-11 10:53:42.239394022 +0000 UTC m=+1139.057953601" observedRunningTime="2025-12-11 10:53:42.57129384 +0000 UTC m=+1139.389853429" watchObservedRunningTime="2025-12-11 10:53:42.5750113 +0000 UTC m=+1139.393570899" Dec 11 10:53:50 crc kubenswrapper[5016]: I1211 10:53:50.446155 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-9s5rq" Dec 11 10:53:50 crc kubenswrapper[5016]: I1211 10:53:50.606503 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fbtnxw" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.589809 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-zzcnd"] Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.597430 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-zzcnd"] Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.597551 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.603018 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.603345 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-8v97c" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.603504 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.605381 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.658625 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-45wdk"] Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.660443 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.662498 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.671458 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-45wdk"] Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.733417 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85164e72-7f87-4203-9406-402e741c17d9-config\") pod \"dnsmasq-dns-675f4bcbfc-zzcnd\" (UID: \"85164e72-7f87-4203-9406-402e741c17d9\") " pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.733735 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94b49\" (UniqueName: \"kubernetes.io/projected/85164e72-7f87-4203-9406-402e741c17d9-kube-api-access-94b49\") pod \"dnsmasq-dns-675f4bcbfc-zzcnd\" (UID: \"85164e72-7f87-4203-9406-402e741c17d9\") " pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.835044 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ffd5160-7545-4860-b9d3-c61eea1c2e72-config\") pod \"dnsmasq-dns-78dd6ddcc-45wdk\" (UID: \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\") " pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.835160 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94b49\" (UniqueName: \"kubernetes.io/projected/85164e72-7f87-4203-9406-402e741c17d9-kube-api-access-94b49\") pod \"dnsmasq-dns-675f4bcbfc-zzcnd\" (UID: \"85164e72-7f87-4203-9406-402e741c17d9\") " pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.835204 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ffd5160-7545-4860-b9d3-c61eea1c2e72-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-45wdk\" (UID: \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\") " pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.835247 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67ltc\" (UniqueName: \"kubernetes.io/projected/2ffd5160-7545-4860-b9d3-c61eea1c2e72-kube-api-access-67ltc\") pod \"dnsmasq-dns-78dd6ddcc-45wdk\" (UID: \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\") " pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.835283 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85164e72-7f87-4203-9406-402e741c17d9-config\") pod \"dnsmasq-dns-675f4bcbfc-zzcnd\" (UID: \"85164e72-7f87-4203-9406-402e741c17d9\") " pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.837271 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85164e72-7f87-4203-9406-402e741c17d9-config\") pod \"dnsmasq-dns-675f4bcbfc-zzcnd\" (UID: \"85164e72-7f87-4203-9406-402e741c17d9\") " pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.856740 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94b49\" (UniqueName: \"kubernetes.io/projected/85164e72-7f87-4203-9406-402e741c17d9-kube-api-access-94b49\") pod \"dnsmasq-dns-675f4bcbfc-zzcnd\" (UID: \"85164e72-7f87-4203-9406-402e741c17d9\") " pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.924853 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.936518 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ffd5160-7545-4860-b9d3-c61eea1c2e72-config\") pod \"dnsmasq-dns-78dd6ddcc-45wdk\" (UID: \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\") " pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.936604 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ffd5160-7545-4860-b9d3-c61eea1c2e72-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-45wdk\" (UID: \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\") " pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.936701 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67ltc\" (UniqueName: \"kubernetes.io/projected/2ffd5160-7545-4860-b9d3-c61eea1c2e72-kube-api-access-67ltc\") pod \"dnsmasq-dns-78dd6ddcc-45wdk\" (UID: \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\") " pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.938334 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ffd5160-7545-4860-b9d3-c61eea1c2e72-config\") pod \"dnsmasq-dns-78dd6ddcc-45wdk\" (UID: \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\") " pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.939202 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ffd5160-7545-4860-b9d3-c61eea1c2e72-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-45wdk\" (UID: \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\") " pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.959825 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67ltc\" (UniqueName: \"kubernetes.io/projected/2ffd5160-7545-4860-b9d3-c61eea1c2e72-kube-api-access-67ltc\") pod \"dnsmasq-dns-78dd6ddcc-45wdk\" (UID: \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\") " pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:06 crc kubenswrapper[5016]: I1211 10:54:06.980564 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:08 crc kubenswrapper[5016]: I1211 10:54:07.552342 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-zzcnd"] Dec 11 10:54:08 crc kubenswrapper[5016]: I1211 10:54:07.632564 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-45wdk"] Dec 11 10:54:08 crc kubenswrapper[5016]: W1211 10:54:07.634462 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ffd5160_7545_4860_b9d3_c61eea1c2e72.slice/crio-58ef958da1698fb24fc353af1969e28a099ff9009e1fc9e5456d3be4ac3220ef WatchSource:0}: Error finding container 58ef958da1698fb24fc353af1969e28a099ff9009e1fc9e5456d3be4ac3220ef: Status 404 returned error can't find the container with id 58ef958da1698fb24fc353af1969e28a099ff9009e1fc9e5456d3be4ac3220ef Dec 11 10:54:08 crc kubenswrapper[5016]: I1211 10:54:07.769029 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" event={"ID":"85164e72-7f87-4203-9406-402e741c17d9","Type":"ContainerStarted","Data":"8702f4c61ce35003e8fbaae64d03e5a4dc5d3f1db0a45f0516d863d45003ec44"} Dec 11 10:54:08 crc kubenswrapper[5016]: I1211 10:54:07.770010 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" event={"ID":"2ffd5160-7545-4860-b9d3-c61eea1c2e72","Type":"ContainerStarted","Data":"58ef958da1698fb24fc353af1969e28a099ff9009e1fc9e5456d3be4ac3220ef"} Dec 11 10:54:09 crc kubenswrapper[5016]: I1211 10:54:09.915617 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-zzcnd"] Dec 11 10:54:09 crc kubenswrapper[5016]: I1211 10:54:09.936270 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-q2mbw"] Dec 11 10:54:09 crc kubenswrapper[5016]: I1211 10:54:09.937804 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:54:09 crc kubenswrapper[5016]: I1211 10:54:09.946403 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-q2mbw"] Dec 11 10:54:09 crc kubenswrapper[5016]: I1211 10:54:09.995748 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vssv4\" (UniqueName: \"kubernetes.io/projected/feb4fb10-1a2c-4e86-bc14-c75750265150-kube-api-access-vssv4\") pod \"dnsmasq-dns-5ccc8479f9-q2mbw\" (UID: \"feb4fb10-1a2c-4e86-bc14-c75750265150\") " pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:54:09 crc kubenswrapper[5016]: I1211 10:54:09.995810 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/feb4fb10-1a2c-4e86-bc14-c75750265150-config\") pod \"dnsmasq-dns-5ccc8479f9-q2mbw\" (UID: \"feb4fb10-1a2c-4e86-bc14-c75750265150\") " pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:54:09 crc kubenswrapper[5016]: I1211 10:54:09.995923 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/feb4fb10-1a2c-4e86-bc14-c75750265150-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-q2mbw\" (UID: \"feb4fb10-1a2c-4e86-bc14-c75750265150\") " pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.096931 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/feb4fb10-1a2c-4e86-bc14-c75750265150-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-q2mbw\" (UID: \"feb4fb10-1a2c-4e86-bc14-c75750265150\") " pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.097025 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vssv4\" (UniqueName: \"kubernetes.io/projected/feb4fb10-1a2c-4e86-bc14-c75750265150-kube-api-access-vssv4\") pod \"dnsmasq-dns-5ccc8479f9-q2mbw\" (UID: \"feb4fb10-1a2c-4e86-bc14-c75750265150\") " pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.097060 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/feb4fb10-1a2c-4e86-bc14-c75750265150-config\") pod \"dnsmasq-dns-5ccc8479f9-q2mbw\" (UID: \"feb4fb10-1a2c-4e86-bc14-c75750265150\") " pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.099929 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/feb4fb10-1a2c-4e86-bc14-c75750265150-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-q2mbw\" (UID: \"feb4fb10-1a2c-4e86-bc14-c75750265150\") " pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.100416 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/feb4fb10-1a2c-4e86-bc14-c75750265150-config\") pod \"dnsmasq-dns-5ccc8479f9-q2mbw\" (UID: \"feb4fb10-1a2c-4e86-bc14-c75750265150\") " pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.154128 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vssv4\" (UniqueName: \"kubernetes.io/projected/feb4fb10-1a2c-4e86-bc14-c75750265150-kube-api-access-vssv4\") pod \"dnsmasq-dns-5ccc8479f9-q2mbw\" (UID: \"feb4fb10-1a2c-4e86-bc14-c75750265150\") " pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.277989 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.438985 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-45wdk"] Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.475689 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-m87sq"] Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.477056 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.486885 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-m87sq"] Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.516588 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-m87sq\" (UID: \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\") " pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.516677 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9qhj\" (UniqueName: \"kubernetes.io/projected/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-kube-api-access-q9qhj\") pod \"dnsmasq-dns-57d769cc4f-m87sq\" (UID: \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\") " pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.516711 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-config\") pod \"dnsmasq-dns-57d769cc4f-m87sq\" (UID: \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\") " pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.618321 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-m87sq\" (UID: \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\") " pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.618393 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9qhj\" (UniqueName: \"kubernetes.io/projected/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-kube-api-access-q9qhj\") pod \"dnsmasq-dns-57d769cc4f-m87sq\" (UID: \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\") " pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.618434 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-config\") pod \"dnsmasq-dns-57d769cc4f-m87sq\" (UID: \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\") " pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.619601 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-m87sq\" (UID: \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\") " pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.620016 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-config\") pod \"dnsmasq-dns-57d769cc4f-m87sq\" (UID: \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\") " pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.667727 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9qhj\" (UniqueName: \"kubernetes.io/projected/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-kube-api-access-q9qhj\") pod \"dnsmasq-dns-57d769cc4f-m87sq\" (UID: \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\") " pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.818201 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:54:10 crc kubenswrapper[5016]: I1211 10:54:10.866392 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-q2mbw"] Dec 11 10:54:10 crc kubenswrapper[5016]: W1211 10:54:10.893297 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfeb4fb10_1a2c_4e86_bc14_c75750265150.slice/crio-8650effc27e07562d7dcdf523b773e21526599fe01ee414b10fcf64bbbf595bb WatchSource:0}: Error finding container 8650effc27e07562d7dcdf523b773e21526599fe01ee414b10fcf64bbbf595bb: Status 404 returned error can't find the container with id 8650effc27e07562d7dcdf523b773e21526599fe01ee414b10fcf64bbbf595bb Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.186148 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.187565 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.189772 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.190042 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.190674 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.191094 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.191351 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.191957 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-t7pn9" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.192191 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.213061 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.235103 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.235222 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.337297 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.337359 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.337395 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.337517 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.337579 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.337611 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.337655 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/313107c9-4bb0-49ad-a67b-7f2e4ae09753-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.338510 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.339250 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/313107c9-4bb0-49ad-a67b-7f2e4ae09753-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.339621 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.339747 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.339818 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqp4d\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-kube-api-access-tqp4d\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.340239 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.369790 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.398429 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-m87sq"] Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.441589 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.446152 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.446235 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.446406 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.446447 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.446507 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/313107c9-4bb0-49ad-a67b-7f2e4ae09753-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.446595 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/313107c9-4bb0-49ad-a67b-7f2e4ae09753-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.446630 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.446763 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqp4d\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-kube-api-access-tqp4d\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.445865 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.447806 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.448022 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.448804 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.454197 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.454220 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.458027 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/313107c9-4bb0-49ad-a67b-7f2e4ae09753-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.460631 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/313107c9-4bb0-49ad-a67b-7f2e4ae09753-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.474802 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqp4d\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-kube-api-access-tqp4d\") pod \"rabbitmq-cell1-server-0\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.538605 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.633008 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.634704 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.641457 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.646807 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.647014 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.647201 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.647339 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-92vt9" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.647489 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.647655 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.664480 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.763112 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.763179 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-pod-info\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.763210 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.763237 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.763262 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.763309 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.763332 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-server-conf\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.763366 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.763391 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.763479 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wc4qd\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-kube-api-access-wc4qd\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.763598 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-config-data\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.864797 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.864863 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-server-conf\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.864902 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.864919 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.864962 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wc4qd\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-kube-api-access-wc4qd\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.865021 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-config-data\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.865074 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.865114 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-pod-info\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.865132 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.865149 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.865204 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.866326 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.867300 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.867576 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.867688 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-config-data\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.867899 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.869404 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.878512 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.880449 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-pod-info\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.930409 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.933984 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" event={"ID":"124b25d9-0d06-4689-8612-4f8b8ca3b0e6","Type":"ContainerStarted","Data":"c6852511e6465fb819fb5d2ad1bc2827b4d74f2f7130bea8471a5f9512db5a6b"} Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.934883 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" event={"ID":"feb4fb10-1a2c-4e86-bc14-c75750265150","Type":"ContainerStarted","Data":"8650effc27e07562d7dcdf523b773e21526599fe01ee414b10fcf64bbbf595bb"} Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.936769 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-server-conf\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.953262 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wc4qd\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-kube-api-access-wc4qd\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:11 crc kubenswrapper[5016]: I1211 10:54:11.982023 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " pod="openstack/rabbitmq-server-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.286065 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.350991 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 10:54:12 crc kubenswrapper[5016]: W1211 10:54:12.378957 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod313107c9_4bb0_49ad_a67b_7f2e4ae09753.slice/crio-228a9b2d1092b574ec0f1d31270adf9b9790ab5d10c878fe75f0ba9b867ffff5 WatchSource:0}: Error finding container 228a9b2d1092b574ec0f1d31270adf9b9790ab5d10c878fe75f0ba9b867ffff5: Status 404 returned error can't find the container with id 228a9b2d1092b574ec0f1d31270adf9b9790ab5d10c878fe75f0ba9b867ffff5 Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.666102 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.671763 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.674917 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-vqswg" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.675179 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.675505 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.677103 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.684889 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.686830 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.779811 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-kolla-config\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.779875 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-config-data-default\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.779929 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.780012 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.780031 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.780064 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qg47\" (UniqueName: \"kubernetes.io/projected/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-kube-api-access-8qg47\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.780094 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.780112 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.884361 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.884415 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.884454 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-kolla-config\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.884680 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-config-data-default\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.884750 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.884793 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.884821 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.884865 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qg47\" (UniqueName: \"kubernetes.io/projected/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-kube-api-access-8qg47\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.891401 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.892570 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.892584 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-kolla-config\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.896134 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.896631 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-config-data-default\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.901245 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.906640 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.930975 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.937214 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qg47\" (UniqueName: \"kubernetes.io/projected/ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd-kube-api-access-8qg47\") pod \"openstack-galera-0\" (UID: \"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd\") " pod="openstack/openstack-galera-0" Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.957034 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"313107c9-4bb0-49ad-a67b-7f2e4ae09753","Type":"ContainerStarted","Data":"228a9b2d1092b574ec0f1d31270adf9b9790ab5d10c878fe75f0ba9b867ffff5"} Dec 11 10:54:12 crc kubenswrapper[5016]: I1211 10:54:12.969120 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 10:54:13 crc kubenswrapper[5016]: W1211 10:54:13.001468 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode46a21b8_75eb_49ac_8d08_0acaaa8fac37.slice/crio-fc67a03497d7436736f45657f91ac05cb4cef827178fa748edab2439c225cd75 WatchSource:0}: Error finding container fc67a03497d7436736f45657f91ac05cb4cef827178fa748edab2439c225cd75: Status 404 returned error can't find the container with id fc67a03497d7436736f45657f91ac05cb4cef827178fa748edab2439c225cd75 Dec 11 10:54:13 crc kubenswrapper[5016]: I1211 10:54:13.013663 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 11 10:54:13 crc kubenswrapper[5016]: I1211 10:54:13.922581 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 11 10:54:13 crc kubenswrapper[5016]: I1211 10:54:13.980136 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd","Type":"ContainerStarted","Data":"3bf18b36568e2cf79b9a0094353bd26630e9b1574406b51fa3cc979d568aad02"} Dec 11 10:54:13 crc kubenswrapper[5016]: I1211 10:54:13.982659 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e46a21b8-75eb-49ac-8d08-0acaaa8fac37","Type":"ContainerStarted","Data":"fc67a03497d7436736f45657f91ac05cb4cef827178fa748edab2439c225cd75"} Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.211576 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.243410 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.243533 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.249780 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.249866 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.249906 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.250110 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-97l5z" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.324279 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wzgj\" (UniqueName: \"kubernetes.io/projected/be590587-03d9-4391-98b3-bacb7432ec51-kube-api-access-2wzgj\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.324337 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/be590587-03d9-4391-98b3-bacb7432ec51-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.324370 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/be590587-03d9-4391-98b3-bacb7432ec51-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.324404 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be590587-03d9-4391-98b3-bacb7432ec51-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.324454 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.324485 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be590587-03d9-4391-98b3-bacb7432ec51-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.324509 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/be590587-03d9-4391-98b3-bacb7432ec51-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.324535 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/be590587-03d9-4391-98b3-bacb7432ec51-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.397287 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.399901 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.406228 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.406298 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.416329 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-gg4r9" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.424228 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.425622 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be590587-03d9-4391-98b3-bacb7432ec51-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.425677 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/be590587-03d9-4391-98b3-bacb7432ec51-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.425714 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/be590587-03d9-4391-98b3-bacb7432ec51-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.425759 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.425798 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-kolla-config\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.425823 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wzgj\" (UniqueName: \"kubernetes.io/projected/be590587-03d9-4391-98b3-bacb7432ec51-kube-api-access-2wzgj\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.425845 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hr8fb\" (UniqueName: \"kubernetes.io/projected/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-kube-api-access-hr8fb\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.425871 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/be590587-03d9-4391-98b3-bacb7432ec51-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.425896 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/be590587-03d9-4391-98b3-bacb7432ec51-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.425918 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-config-data\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.425965 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be590587-03d9-4391-98b3-bacb7432ec51-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.425995 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.426042 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.426437 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.426987 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/be590587-03d9-4391-98b3-bacb7432ec51-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.428616 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/be590587-03d9-4391-98b3-bacb7432ec51-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.428727 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/be590587-03d9-4391-98b3-bacb7432ec51-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.429804 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be590587-03d9-4391-98b3-bacb7432ec51-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.433460 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/be590587-03d9-4391-98b3-bacb7432ec51-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.434447 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be590587-03d9-4391-98b3-bacb7432ec51-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.474470 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wzgj\" (UniqueName: \"kubernetes.io/projected/be590587-03d9-4391-98b3-bacb7432ec51-kube-api-access-2wzgj\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.478532 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-cell1-galera-0\" (UID: \"be590587-03d9-4391-98b3-bacb7432ec51\") " pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.527039 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.527103 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-kolla-config\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.527127 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hr8fb\" (UniqueName: \"kubernetes.io/projected/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-kube-api-access-hr8fb\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.527151 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-config-data\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.527184 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.530643 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.532862 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.533540 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-kolla-config\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.545247 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-config-data\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.580702 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hr8fb\" (UniqueName: \"kubernetes.io/projected/f0588b90-e0f3-49e1-9ff9-76e8aac23b93-kube-api-access-hr8fb\") pod \"memcached-0\" (UID: \"f0588b90-e0f3-49e1-9ff9-76e8aac23b93\") " pod="openstack/memcached-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.596470 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 11 10:54:14 crc kubenswrapper[5016]: I1211 10:54:14.753313 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 11 10:54:15 crc kubenswrapper[5016]: I1211 10:54:15.289049 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 11 10:54:15 crc kubenswrapper[5016]: W1211 10:54:15.333299 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe590587_03d9_4391_98b3_bacb7432ec51.slice/crio-5d1fda2f21aeb6f1825f9f61da319bf24326de0e6411e62bd98c1592fcdcefdc WatchSource:0}: Error finding container 5d1fda2f21aeb6f1825f9f61da319bf24326de0e6411e62bd98c1592fcdcefdc: Status 404 returned error can't find the container with id 5d1fda2f21aeb6f1825f9f61da319bf24326de0e6411e62bd98c1592fcdcefdc Dec 11 10:54:15 crc kubenswrapper[5016]: I1211 10:54:15.699582 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 11 10:54:15 crc kubenswrapper[5016]: W1211 10:54:15.748879 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf0588b90_e0f3_49e1_9ff9_76e8aac23b93.slice/crio-e9a6fcb9b313ee18c50ab9279dd2723784037b51a643409e29dbfe2ffa1ba2d2 WatchSource:0}: Error finding container e9a6fcb9b313ee18c50ab9279dd2723784037b51a643409e29dbfe2ffa1ba2d2: Status 404 returned error can't find the container with id e9a6fcb9b313ee18c50ab9279dd2723784037b51a643409e29dbfe2ffa1ba2d2 Dec 11 10:54:16 crc kubenswrapper[5016]: I1211 10:54:16.029731 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"be590587-03d9-4391-98b3-bacb7432ec51","Type":"ContainerStarted","Data":"5d1fda2f21aeb6f1825f9f61da319bf24326de0e6411e62bd98c1592fcdcefdc"} Dec 11 10:54:16 crc kubenswrapper[5016]: I1211 10:54:16.033385 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f0588b90-e0f3-49e1-9ff9-76e8aac23b93","Type":"ContainerStarted","Data":"e9a6fcb9b313ee18c50ab9279dd2723784037b51a643409e29dbfe2ffa1ba2d2"} Dec 11 10:54:16 crc kubenswrapper[5016]: I1211 10:54:16.409958 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 10:54:16 crc kubenswrapper[5016]: I1211 10:54:16.411134 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 11 10:54:16 crc kubenswrapper[5016]: I1211 10:54:16.420543 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 10:54:16 crc kubenswrapper[5016]: I1211 10:54:16.424965 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-blc9l" Dec 11 10:54:16 crc kubenswrapper[5016]: I1211 10:54:16.499868 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pjbh\" (UniqueName: \"kubernetes.io/projected/070a458f-4698-4701-903e-a64d4dc7d95a-kube-api-access-4pjbh\") pod \"kube-state-metrics-0\" (UID: \"070a458f-4698-4701-903e-a64d4dc7d95a\") " pod="openstack/kube-state-metrics-0" Dec 11 10:54:16 crc kubenswrapper[5016]: I1211 10:54:16.602848 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pjbh\" (UniqueName: \"kubernetes.io/projected/070a458f-4698-4701-903e-a64d4dc7d95a-kube-api-access-4pjbh\") pod \"kube-state-metrics-0\" (UID: \"070a458f-4698-4701-903e-a64d4dc7d95a\") " pod="openstack/kube-state-metrics-0" Dec 11 10:54:16 crc kubenswrapper[5016]: I1211 10:54:16.632848 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pjbh\" (UniqueName: \"kubernetes.io/projected/070a458f-4698-4701-903e-a64d4dc7d95a-kube-api-access-4pjbh\") pod \"kube-state-metrics-0\" (UID: \"070a458f-4698-4701-903e-a64d4dc7d95a\") " pod="openstack/kube-state-metrics-0" Dec 11 10:54:16 crc kubenswrapper[5016]: I1211 10:54:16.758371 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 11 10:54:17 crc kubenswrapper[5016]: I1211 10:54:17.610501 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.206150 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-g76kk"] Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.208709 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.213171 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.213576 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-m5jnq" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.213910 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.229180 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-g76kk"] Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.317535 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-scripts\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.317619 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvd7n\" (UniqueName: \"kubernetes.io/projected/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-kube-api-access-zvd7n\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.317690 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-var-log-ovn\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.317726 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-combined-ca-bundle\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.317784 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-var-run-ovn\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.317842 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-var-run\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.317886 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-ovn-controller-tls-certs\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.332684 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-vbtwd"] Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.335665 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.349385 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-vbtwd"] Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.419552 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-combined-ca-bundle\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.419650 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-var-run-ovn\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.419709 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-var-run\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.419754 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-ovn-controller-tls-certs\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.419800 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-scripts\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.419830 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvd7n\" (UniqueName: \"kubernetes.io/projected/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-kube-api-access-zvd7n\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.419881 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-var-log-ovn\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.420357 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-var-run\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.420619 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-var-log-ovn\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.420690 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-var-run-ovn\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.423553 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-scripts\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.428815 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-ovn-controller-tls-certs\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.439833 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvd7n\" (UniqueName: \"kubernetes.io/projected/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-kube-api-access-zvd7n\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.440814 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf0694a8-c7ff-429f-a52f-5885a8dcb3ac-combined-ca-bundle\") pod \"ovn-controller-g76kk\" (UID: \"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac\") " pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.521179 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/4f718adb-56cf-4983-bd6a-e750e06edad7-etc-ovs\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.521244 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4f718adb-56cf-4983-bd6a-e750e06edad7-var-run\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.521406 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4f718adb-56cf-4983-bd6a-e750e06edad7-scripts\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.521476 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/4f718adb-56cf-4983-bd6a-e750e06edad7-var-log\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.521509 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/4f718adb-56cf-4983-bd6a-e750e06edad7-var-lib\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.521573 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4hdl\" (UniqueName: \"kubernetes.io/projected/4f718adb-56cf-4983-bd6a-e750e06edad7-kube-api-access-h4hdl\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.559655 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-g76kk" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.623617 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/4f718adb-56cf-4983-bd6a-e750e06edad7-etc-ovs\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.623676 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4f718adb-56cf-4983-bd6a-e750e06edad7-var-run\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.623836 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4f718adb-56cf-4983-bd6a-e750e06edad7-scripts\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.623921 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/4f718adb-56cf-4983-bd6a-e750e06edad7-var-log\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.623993 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/4f718adb-56cf-4983-bd6a-e750e06edad7-var-lib\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.624075 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4hdl\" (UniqueName: \"kubernetes.io/projected/4f718adb-56cf-4983-bd6a-e750e06edad7-kube-api-access-h4hdl\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.625198 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/4f718adb-56cf-4983-bd6a-e750e06edad7-etc-ovs\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.625291 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4f718adb-56cf-4983-bd6a-e750e06edad7-var-run\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.628997 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4f718adb-56cf-4983-bd6a-e750e06edad7-scripts\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.630562 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/4f718adb-56cf-4983-bd6a-e750e06edad7-var-lib\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.630690 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/4f718adb-56cf-4983-bd6a-e750e06edad7-var-log\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.665833 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4hdl\" (UniqueName: \"kubernetes.io/projected/4f718adb-56cf-4983-bd6a-e750e06edad7-kube-api-access-h4hdl\") pod \"ovn-controller-ovs-vbtwd\" (UID: \"4f718adb-56cf-4983-bd6a-e750e06edad7\") " pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.671997 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.705923 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.709376 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.716720 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-pcz7v" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.717197 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.718164 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.718268 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.718399 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.734117 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.830231 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/65ac3f0e-4016-4586-b742-2c52252ed51b-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.830398 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65ac3f0e-4016-4586-b742-2c52252ed51b-config\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.830434 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/65ac3f0e-4016-4586-b742-2c52252ed51b-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.830494 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/65ac3f0e-4016-4586-b742-2c52252ed51b-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.830516 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vwcj\" (UniqueName: \"kubernetes.io/projected/65ac3f0e-4016-4586-b742-2c52252ed51b-kube-api-access-6vwcj\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.830568 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/65ac3f0e-4016-4586-b742-2c52252ed51b-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.830590 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65ac3f0e-4016-4586-b742-2c52252ed51b-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.830619 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.932924 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/65ac3f0e-4016-4586-b742-2c52252ed51b-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.933016 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65ac3f0e-4016-4586-b742-2c52252ed51b-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.933052 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.933130 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/65ac3f0e-4016-4586-b742-2c52252ed51b-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.933164 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65ac3f0e-4016-4586-b742-2c52252ed51b-config\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.933192 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/65ac3f0e-4016-4586-b742-2c52252ed51b-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.933241 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/65ac3f0e-4016-4586-b742-2c52252ed51b-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.933258 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vwcj\" (UniqueName: \"kubernetes.io/projected/65ac3f0e-4016-4586-b742-2c52252ed51b-kube-api-access-6vwcj\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.935749 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.935854 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/65ac3f0e-4016-4586-b742-2c52252ed51b-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.936545 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/65ac3f0e-4016-4586-b742-2c52252ed51b-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.939301 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65ac3f0e-4016-4586-b742-2c52252ed51b-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.939651 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65ac3f0e-4016-4586-b742-2c52252ed51b-config\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.948960 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/65ac3f0e-4016-4586-b742-2c52252ed51b-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.961000 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vwcj\" (UniqueName: \"kubernetes.io/projected/65ac3f0e-4016-4586-b742-2c52252ed51b-kube-api-access-6vwcj\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.969773 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/65ac3f0e-4016-4586-b742-2c52252ed51b-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:20 crc kubenswrapper[5016]: I1211 10:54:20.973125 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-nb-0\" (UID: \"65ac3f0e-4016-4586-b742-2c52252ed51b\") " pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:21 crc kubenswrapper[5016]: I1211 10:54:21.056080 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.580354 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.584660 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.594224 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-nh7pw" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.594536 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.598235 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.599895 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.608455 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.710069 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.710350 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.710481 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47b9r\" (UniqueName: \"kubernetes.io/projected/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-kube-api-access-47b9r\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.710655 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.710798 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.710881 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.711014 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.711258 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-config\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.813088 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.813438 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.813634 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47b9r\" (UniqueName: \"kubernetes.io/projected/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-kube-api-access-47b9r\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.813755 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.813905 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.814018 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.814097 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.814196 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.814338 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-config\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.814262 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.815235 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.815529 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-config\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.820611 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.820692 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.821367 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.856925 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47b9r\" (UniqueName: \"kubernetes.io/projected/de7b514e-0bc7-4260-9bc4-9c0f1b13562b-kube-api-access-47b9r\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.858999 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"de7b514e-0bc7-4260-9bc4-9c0f1b13562b\") " pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:23 crc kubenswrapper[5016]: I1211 10:54:23.922510 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 11 10:54:24 crc kubenswrapper[5016]: W1211 10:54:24.305213 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod070a458f_4698_4701_903e_a64d4dc7d95a.slice/crio-840a15d932c99263407454fa1d2e650027c5974ec646b004fdeb57063524b41a WatchSource:0}: Error finding container 840a15d932c99263407454fa1d2e650027c5974ec646b004fdeb57063524b41a: Status 404 returned error can't find the container with id 840a15d932c99263407454fa1d2e650027c5974ec646b004fdeb57063524b41a Dec 11 10:54:25 crc kubenswrapper[5016]: I1211 10:54:25.208545 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"070a458f-4698-4701-903e-a64d4dc7d95a","Type":"ContainerStarted","Data":"840a15d932c99263407454fa1d2e650027c5974ec646b004fdeb57063524b41a"} Dec 11 10:54:41 crc kubenswrapper[5016]: E1211 10:54:41.973999 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Dec 11 10:54:41 crc kubenswrapper[5016]: E1211 10:54:41.975440 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wc4qd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(e46a21b8-75eb-49ac-8d08-0acaaa8fac37): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:54:41 crc kubenswrapper[5016]: E1211 10:54:41.977385 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="e46a21b8-75eb-49ac-8d08-0acaaa8fac37" Dec 11 10:54:42 crc kubenswrapper[5016]: E1211 10:54:42.345181 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="e46a21b8-75eb-49ac-8d08-0acaaa8fac37" Dec 11 10:54:43 crc kubenswrapper[5016]: E1211 10:54:43.899240 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 11 10:54:43 crc kubenswrapper[5016]: E1211 10:54:43.899493 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8qg47,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:54:43 crc kubenswrapper[5016]: E1211 10:54:43.900721 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd" Dec 11 10:54:44 crc kubenswrapper[5016]: E1211 10:54:44.358364 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd" Dec 11 10:54:44 crc kubenswrapper[5016]: E1211 10:54:44.591300 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Dec 11 10:54:44 crc kubenswrapper[5016]: E1211 10:54:44.591507 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n695h564h5cfh589h689h568h5f7h79h78h545h67fh68bh88h8bh5b7h7h5ffh9ch579h697h89h5d6hcbh689h5fh5dbhffh668hbdh54bh5fchbq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hr8fb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(f0588b90-e0f3-49e1-9ff9-76e8aac23b93): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:54:44 crc kubenswrapper[5016]: E1211 10:54:44.592849 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="f0588b90-e0f3-49e1-9ff9-76e8aac23b93" Dec 11 10:54:45 crc kubenswrapper[5016]: E1211 10:54:45.366877 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="f0588b90-e0f3-49e1-9ff9-76e8aac23b93" Dec 11 10:54:52 crc kubenswrapper[5016]: E1211 10:54:52.449846 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Dec 11 10:54:52 crc kubenswrapper[5016]: E1211 10:54:52.450780 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tqp4d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(313107c9-4bb0-49ad-a67b-7f2e4ae09753): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:54:52 crc kubenswrapper[5016]: E1211 10:54:52.451989 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="313107c9-4bb0-49ad-a67b-7f2e4ae09753" Dec 11 10:54:52 crc kubenswrapper[5016]: E1211 10:54:52.459244 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 11 10:54:52 crc kubenswrapper[5016]: E1211 10:54:52.459475 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2wzgj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(be590587-03d9-4391-98b3-bacb7432ec51): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:54:52 crc kubenswrapper[5016]: E1211 10:54:52.461560 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="be590587-03d9-4391-98b3-bacb7432ec51" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.425878 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="313107c9-4bb0-49ad-a67b-7f2e4ae09753" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.426594 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="be590587-03d9-4391-98b3-bacb7432ec51" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.522511 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.522741 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-94b49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-zzcnd_openstack(85164e72-7f87-4203-9406-402e741c17d9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.524390 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" podUID="85164e72-7f87-4203-9406-402e741c17d9" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.565566 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.566692 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfdh5dfhb6h64h676hc4h78h97h669h54chfbh696hb5h54bh5d4h6bh64h644h677h584h5cbh698h9dh5bbh5f8h5b8hcdh644h5c7h694hbfh589q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vssv4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5ccc8479f9-q2mbw_openstack(feb4fb10-1a2c-4e86-bc14-c75750265150): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.568293 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" podUID="feb4fb10-1a2c-4e86-bc14-c75750265150" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.582364 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.582605 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q9qhj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-m87sq_openstack(124b25d9-0d06-4689-8612-4f8b8ca3b0e6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.584031 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" podUID="124b25d9-0d06-4689-8612-4f8b8ca3b0e6" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.593049 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.593186 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-67ltc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-45wdk_openstack(2ffd5160-7545-4860-b9d3-c61eea1c2e72): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:54:53 crc kubenswrapper[5016]: E1211 10:54:53.594316 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" podUID="2ffd5160-7545-4860-b9d3-c61eea1c2e72" Dec 11 10:54:53 crc kubenswrapper[5016]: I1211 10:54:53.825877 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-g76kk"] Dec 11 10:54:53 crc kubenswrapper[5016]: W1211 10:54:53.911789 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf0694a8_c7ff_429f_a52f_5885a8dcb3ac.slice/crio-34d977e49cbcced03af915e764bc99093cbd2143c80e3ad653ab4f5bd5137cbd WatchSource:0}: Error finding container 34d977e49cbcced03af915e764bc99093cbd2143c80e3ad653ab4f5bd5137cbd: Status 404 returned error can't find the container with id 34d977e49cbcced03af915e764bc99093cbd2143c80e3ad653ab4f5bd5137cbd Dec 11 10:54:53 crc kubenswrapper[5016]: I1211 10:54:53.973796 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 11 10:54:54 crc kubenswrapper[5016]: I1211 10:54:54.075849 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 11 10:54:54 crc kubenswrapper[5016]: W1211 10:54:54.193283 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde7b514e_0bc7_4260_9bc4_9c0f1b13562b.slice/crio-dfd34e4bf7369a8698d73e5b2dfa02b105e02a1d8bbc163599e5ef81d5e717a3 WatchSource:0}: Error finding container dfd34e4bf7369a8698d73e5b2dfa02b105e02a1d8bbc163599e5ef81d5e717a3: Status 404 returned error can't find the container with id dfd34e4bf7369a8698d73e5b2dfa02b105e02a1d8bbc163599e5ef81d5e717a3 Dec 11 10:54:54 crc kubenswrapper[5016]: I1211 10:54:54.432659 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-g76kk" event={"ID":"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac","Type":"ContainerStarted","Data":"34d977e49cbcced03af915e764bc99093cbd2143c80e3ad653ab4f5bd5137cbd"} Dec 11 10:54:54 crc kubenswrapper[5016]: I1211 10:54:54.433921 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"65ac3f0e-4016-4586-b742-2c52252ed51b","Type":"ContainerStarted","Data":"02cdb9b83ed26c2b46960907ad2c98b43840c0f9a38de50f833af626f6af1ca0"} Dec 11 10:54:54 crc kubenswrapper[5016]: I1211 10:54:54.436171 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"de7b514e-0bc7-4260-9bc4-9c0f1b13562b","Type":"ContainerStarted","Data":"dfd34e4bf7369a8698d73e5b2dfa02b105e02a1d8bbc163599e5ef81d5e717a3"} Dec 11 10:54:54 crc kubenswrapper[5016]: E1211 10:54:54.438021 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" podUID="124b25d9-0d06-4689-8612-4f8b8ca3b0e6" Dec 11 10:54:54 crc kubenswrapper[5016]: E1211 10:54:54.438708 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" podUID="feb4fb10-1a2c-4e86-bc14-c75750265150" Dec 11 10:54:54 crc kubenswrapper[5016]: I1211 10:54:54.672166 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-vbtwd"] Dec 11 10:54:54 crc kubenswrapper[5016]: I1211 10:54:54.936956 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" Dec 11 10:54:54 crc kubenswrapper[5016]: I1211 10:54:54.944595 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.059606 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85164e72-7f87-4203-9406-402e741c17d9-config\") pod \"85164e72-7f87-4203-9406-402e741c17d9\" (UID: \"85164e72-7f87-4203-9406-402e741c17d9\") " Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.059657 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94b49\" (UniqueName: \"kubernetes.io/projected/85164e72-7f87-4203-9406-402e741c17d9-kube-api-access-94b49\") pod \"85164e72-7f87-4203-9406-402e741c17d9\" (UID: \"85164e72-7f87-4203-9406-402e741c17d9\") " Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.059726 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ffd5160-7545-4860-b9d3-c61eea1c2e72-config\") pod \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\" (UID: \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\") " Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.059794 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67ltc\" (UniqueName: \"kubernetes.io/projected/2ffd5160-7545-4860-b9d3-c61eea1c2e72-kube-api-access-67ltc\") pod \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\" (UID: \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\") " Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.059813 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ffd5160-7545-4860-b9d3-c61eea1c2e72-dns-svc\") pod \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\" (UID: \"2ffd5160-7545-4860-b9d3-c61eea1c2e72\") " Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.060457 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85164e72-7f87-4203-9406-402e741c17d9-config" (OuterVolumeSpecName: "config") pod "85164e72-7f87-4203-9406-402e741c17d9" (UID: "85164e72-7f87-4203-9406-402e741c17d9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.060619 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ffd5160-7545-4860-b9d3-c61eea1c2e72-config" (OuterVolumeSpecName: "config") pod "2ffd5160-7545-4860-b9d3-c61eea1c2e72" (UID: "2ffd5160-7545-4860-b9d3-c61eea1c2e72"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.060639 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ffd5160-7545-4860-b9d3-c61eea1c2e72-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2ffd5160-7545-4860-b9d3-c61eea1c2e72" (UID: "2ffd5160-7545-4860-b9d3-c61eea1c2e72"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.060924 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85164e72-7f87-4203-9406-402e741c17d9-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.060961 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ffd5160-7545-4860-b9d3-c61eea1c2e72-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.060977 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ffd5160-7545-4860-b9d3-c61eea1c2e72-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.066362 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85164e72-7f87-4203-9406-402e741c17d9-kube-api-access-94b49" (OuterVolumeSpecName: "kube-api-access-94b49") pod "85164e72-7f87-4203-9406-402e741c17d9" (UID: "85164e72-7f87-4203-9406-402e741c17d9"). InnerVolumeSpecName "kube-api-access-94b49". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.066683 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ffd5160-7545-4860-b9d3-c61eea1c2e72-kube-api-access-67ltc" (OuterVolumeSpecName: "kube-api-access-67ltc") pod "2ffd5160-7545-4860-b9d3-c61eea1c2e72" (UID: "2ffd5160-7545-4860-b9d3-c61eea1c2e72"). InnerVolumeSpecName "kube-api-access-67ltc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.162626 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67ltc\" (UniqueName: \"kubernetes.io/projected/2ffd5160-7545-4860-b9d3-c61eea1c2e72-kube-api-access-67ltc\") on node \"crc\" DevicePath \"\"" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.162670 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94b49\" (UniqueName: \"kubernetes.io/projected/85164e72-7f87-4203-9406-402e741c17d9-kube-api-access-94b49\") on node \"crc\" DevicePath \"\"" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.445363 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.445367 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-45wdk" event={"ID":"2ffd5160-7545-4860-b9d3-c61eea1c2e72","Type":"ContainerDied","Data":"58ef958da1698fb24fc353af1969e28a099ff9009e1fc9e5456d3be4ac3220ef"} Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.446774 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" event={"ID":"85164e72-7f87-4203-9406-402e741c17d9","Type":"ContainerDied","Data":"8702f4c61ce35003e8fbaae64d03e5a4dc5d3f1db0a45f0516d863d45003ec44"} Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.446854 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-zzcnd" Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.457177 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vbtwd" event={"ID":"4f718adb-56cf-4983-bd6a-e750e06edad7","Type":"ContainerStarted","Data":"c975f96aca68bc4fd7a3cd310fcf76ff83bece3024500447d7ad6b7e698a6cfb"} Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.520164 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-zzcnd"] Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.538067 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-zzcnd"] Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.593236 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-45wdk"] Dec 11 10:54:55 crc kubenswrapper[5016]: I1211 10:54:55.600757 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-45wdk"] Dec 11 10:54:57 crc kubenswrapper[5016]: I1211 10:54:57.489718 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ffd5160-7545-4860-b9d3-c61eea1c2e72" path="/var/lib/kubelet/pods/2ffd5160-7545-4860-b9d3-c61eea1c2e72/volumes" Dec 11 10:54:57 crc kubenswrapper[5016]: I1211 10:54:57.493499 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85164e72-7f87-4203-9406-402e741c17d9" path="/var/lib/kubelet/pods/85164e72-7f87-4203-9406-402e741c17d9/volumes" Dec 11 10:54:59 crc kubenswrapper[5016]: I1211 10:54:59.519291 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"de7b514e-0bc7-4260-9bc4-9c0f1b13562b","Type":"ContainerStarted","Data":"c50d735f5501e712daee1c79b428046e4e737127234a1261a8002f4b84b77162"} Dec 11 10:54:59 crc kubenswrapper[5016]: I1211 10:54:59.522846 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-g76kk" event={"ID":"cf0694a8-c7ff-429f-a52f-5885a8dcb3ac","Type":"ContainerStarted","Data":"51a13aee2e6607a75b851a1c833362cfc1af1c7538be302b69be6a30d936c4c8"} Dec 11 10:54:59 crc kubenswrapper[5016]: I1211 10:54:59.522972 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-g76kk" Dec 11 10:54:59 crc kubenswrapper[5016]: I1211 10:54:59.525540 5016 generic.go:334] "Generic (PLEG): container finished" podID="4f718adb-56cf-4983-bd6a-e750e06edad7" containerID="d93ffbd4554b91181e0b441f3c52e7b8ad14a177f313090557d5415d4d2fa50b" exitCode=0 Dec 11 10:54:59 crc kubenswrapper[5016]: I1211 10:54:59.525640 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vbtwd" event={"ID":"4f718adb-56cf-4983-bd6a-e750e06edad7","Type":"ContainerDied","Data":"d93ffbd4554b91181e0b441f3c52e7b8ad14a177f313090557d5415d4d2fa50b"} Dec 11 10:54:59 crc kubenswrapper[5016]: I1211 10:54:59.528909 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd","Type":"ContainerStarted","Data":"e5441204e092f4e7d294a4836f4639b525c95f16eb77fad96c4f205be8614c57"} Dec 11 10:54:59 crc kubenswrapper[5016]: I1211 10:54:59.531361 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"65ac3f0e-4016-4586-b742-2c52252ed51b","Type":"ContainerStarted","Data":"13579a958c7061d6184641a4a39ff2ccb81025b02a95ae9d89dc9c74579234e3"} Dec 11 10:54:59 crc kubenswrapper[5016]: I1211 10:54:59.545465 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-g76kk" podStartSLOduration=35.270964699 podStartE2EDuration="39.545443067s" podCreationTimestamp="2025-12-11 10:54:20 +0000 UTC" firstStartedPulling="2025-12-11 10:54:53.919069323 +0000 UTC m=+1210.737628902" lastFinishedPulling="2025-12-11 10:54:58.193547691 +0000 UTC m=+1215.012107270" observedRunningTime="2025-12-11 10:54:59.543346285 +0000 UTC m=+1216.361905874" watchObservedRunningTime="2025-12-11 10:54:59.545443067 +0000 UTC m=+1216.364002656" Dec 11 10:55:00 crc kubenswrapper[5016]: I1211 10:55:00.544907 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e46a21b8-75eb-49ac-8d08-0acaaa8fac37","Type":"ContainerStarted","Data":"ccccf842dadabad37bad8683166c7169076d2baa22d9ea9bc6e44216e5739d4e"} Dec 11 10:55:00 crc kubenswrapper[5016]: I1211 10:55:00.547959 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vbtwd" event={"ID":"4f718adb-56cf-4983-bd6a-e750e06edad7","Type":"ContainerStarted","Data":"9fea1233db2a379edb7142e6bb4ce4d609c1dd4e4d0da4d7418443038b8997a8"} Dec 11 10:55:00 crc kubenswrapper[5016]: I1211 10:55:00.550251 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f0588b90-e0f3-49e1-9ff9-76e8aac23b93","Type":"ContainerStarted","Data":"c9d7ac8601abdddce2f1028dd6cff0c649bf6d32316a1a230a5c23eee7f08fff"} Dec 11 10:55:00 crc kubenswrapper[5016]: I1211 10:55:00.550601 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Dec 11 10:55:00 crc kubenswrapper[5016]: I1211 10:55:00.596089 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=3.320786019 podStartE2EDuration="46.596066466s" podCreationTimestamp="2025-12-11 10:54:14 +0000 UTC" firstStartedPulling="2025-12-11 10:54:15.769409447 +0000 UTC m=+1172.587969026" lastFinishedPulling="2025-12-11 10:54:59.044689894 +0000 UTC m=+1215.863249473" observedRunningTime="2025-12-11 10:55:00.589158437 +0000 UTC m=+1217.407718026" watchObservedRunningTime="2025-12-11 10:55:00.596066466 +0000 UTC m=+1217.414626055" Dec 11 10:55:01 crc kubenswrapper[5016]: I1211 10:55:01.567084 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vbtwd" event={"ID":"4f718adb-56cf-4983-bd6a-e750e06edad7","Type":"ContainerStarted","Data":"2b3801290eaf5a43420f97926001807e13edb2c9bf6b39b5dc086227fbf549f5"} Dec 11 10:55:01 crc kubenswrapper[5016]: I1211 10:55:01.567748 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:55:01 crc kubenswrapper[5016]: I1211 10:55:01.567785 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:55:01 crc kubenswrapper[5016]: I1211 10:55:01.570062 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"070a458f-4698-4701-903e-a64d4dc7d95a","Type":"ContainerStarted","Data":"53ac27903523f9bbe2bac6028e86dfda9477868b2a3454901c341f8555e89b74"} Dec 11 10:55:01 crc kubenswrapper[5016]: I1211 10:55:01.570261 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 11 10:55:01 crc kubenswrapper[5016]: I1211 10:55:01.595789 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-vbtwd" podStartSLOduration=38.205387102 podStartE2EDuration="41.595773747s" podCreationTimestamp="2025-12-11 10:54:20 +0000 UTC" firstStartedPulling="2025-12-11 10:54:54.779516575 +0000 UTC m=+1211.598076154" lastFinishedPulling="2025-12-11 10:54:58.16990322 +0000 UTC m=+1214.988462799" observedRunningTime="2025-12-11 10:55:01.592799144 +0000 UTC m=+1218.411358743" watchObservedRunningTime="2025-12-11 10:55:01.595773747 +0000 UTC m=+1218.414333326" Dec 11 10:55:01 crc kubenswrapper[5016]: I1211 10:55:01.617296 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=9.467737646 podStartE2EDuration="45.617279024s" podCreationTimestamp="2025-12-11 10:54:16 +0000 UTC" firstStartedPulling="2025-12-11 10:54:24.308470949 +0000 UTC m=+1181.127030528" lastFinishedPulling="2025-12-11 10:55:00.458012327 +0000 UTC m=+1217.276571906" observedRunningTime="2025-12-11 10:55:01.610416746 +0000 UTC m=+1218.428976325" watchObservedRunningTime="2025-12-11 10:55:01.617279024 +0000 UTC m=+1218.435838603" Dec 11 10:55:02 crc kubenswrapper[5016]: I1211 10:55:02.578868 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"65ac3f0e-4016-4586-b742-2c52252ed51b","Type":"ContainerStarted","Data":"3cf32c43773885091a8f544ae7367a5e02fb5dd0bf94b6fb4990fa2967413c54"} Dec 11 10:55:02 crc kubenswrapper[5016]: I1211 10:55:02.581492 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"de7b514e-0bc7-4260-9bc4-9c0f1b13562b","Type":"ContainerStarted","Data":"b1df932e2da8d8af6cb213abfe7cf81d5be0884dafa71ff849019589b88a122c"} Dec 11 10:55:02 crc kubenswrapper[5016]: I1211 10:55:02.602186 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=35.533943696 podStartE2EDuration="43.602161301s" podCreationTimestamp="2025-12-11 10:54:19 +0000 UTC" firstStartedPulling="2025-12-11 10:54:54.199412354 +0000 UTC m=+1211.017971933" lastFinishedPulling="2025-12-11 10:55:02.267629959 +0000 UTC m=+1219.086189538" observedRunningTime="2025-12-11 10:55:02.598801328 +0000 UTC m=+1219.417360927" watchObservedRunningTime="2025-12-11 10:55:02.602161301 +0000 UTC m=+1219.420720890" Dec 11 10:55:02 crc kubenswrapper[5016]: I1211 10:55:02.626315 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=32.544178807 podStartE2EDuration="40.626298383s" podCreationTimestamp="2025-12-11 10:54:22 +0000 UTC" firstStartedPulling="2025-12-11 10:54:54.199390694 +0000 UTC m=+1211.017950273" lastFinishedPulling="2025-12-11 10:55:02.28151027 +0000 UTC m=+1219.100069849" observedRunningTime="2025-12-11 10:55:02.620755417 +0000 UTC m=+1219.439315016" watchObservedRunningTime="2025-12-11 10:55:02.626298383 +0000 UTC m=+1219.444857962" Dec 11 10:55:02 crc kubenswrapper[5016]: I1211 10:55:02.922923 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Dec 11 10:55:02 crc kubenswrapper[5016]: I1211 10:55:02.966593 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.056280 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.092469 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.590963 5016 generic.go:334] "Generic (PLEG): container finished" podID="ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd" containerID="e5441204e092f4e7d294a4836f4639b525c95f16eb77fad96c4f205be8614c57" exitCode=0 Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.591023 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd","Type":"ContainerDied","Data":"e5441204e092f4e7d294a4836f4639b525c95f16eb77fad96c4f205be8614c57"} Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.591407 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.591452 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.637381 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.637968 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.845873 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-q2mbw"] Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.906645 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-zbgtt"] Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.908838 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.915393 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.932545 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-rg5mz"] Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.934142 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.939408 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.952587 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-zbgtt"] Dec 11 10:55:03 crc kubenswrapper[5016]: I1211 10:55:03.975873 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-rg5mz"] Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.022919 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-zbgtt\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.023008 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7h64\" (UniqueName: \"kubernetes.io/projected/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-kube-api-access-f7h64\") pod \"dnsmasq-dns-5bf47b49b7-zbgtt\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.023079 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-config\") pod \"dnsmasq-dns-5bf47b49b7-zbgtt\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.023151 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvcd4\" (UniqueName: \"kubernetes.io/projected/d49703c4-2744-4669-baae-fc1ee5932f5d-kube-api-access-qvcd4\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.023185 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d49703c4-2744-4669-baae-fc1ee5932f5d-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.023247 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-zbgtt\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.023268 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d49703c4-2744-4669-baae-fc1ee5932f5d-ovn-rundir\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.023324 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d49703c4-2744-4669-baae-fc1ee5932f5d-combined-ca-bundle\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.023404 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d49703c4-2744-4669-baae-fc1ee5932f5d-ovs-rundir\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.023433 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d49703c4-2744-4669-baae-fc1ee5932f5d-config\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.065393 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-m87sq"] Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.093221 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-zvj76"] Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.094695 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.100605 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.125624 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d49703c4-2744-4669-baae-fc1ee5932f5d-combined-ca-bundle\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.125684 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d49703c4-2744-4669-baae-fc1ee5932f5d-ovs-rundir\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.125707 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d49703c4-2744-4669-baae-fc1ee5932f5d-config\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.126550 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-zbgtt\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.126612 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7h64\" (UniqueName: \"kubernetes.io/projected/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-kube-api-access-f7h64\") pod \"dnsmasq-dns-5bf47b49b7-zbgtt\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.126655 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-config\") pod \"dnsmasq-dns-5bf47b49b7-zbgtt\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.126675 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvcd4\" (UniqueName: \"kubernetes.io/projected/d49703c4-2744-4669-baae-fc1ee5932f5d-kube-api-access-qvcd4\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.126726 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d49703c4-2744-4669-baae-fc1ee5932f5d-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.126783 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-zbgtt\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.126804 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d49703c4-2744-4669-baae-fc1ee5932f5d-ovn-rundir\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.127435 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d49703c4-2744-4669-baae-fc1ee5932f5d-ovn-rundir\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.133249 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-zvj76"] Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.135034 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d49703c4-2744-4669-baae-fc1ee5932f5d-ovs-rundir\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.135288 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-config\") pod \"dnsmasq-dns-5bf47b49b7-zbgtt\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.135326 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-zbgtt\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.135649 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d49703c4-2744-4669-baae-fc1ee5932f5d-config\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.136049 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-zbgtt\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.148795 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.150679 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.160507 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d49703c4-2744-4669-baae-fc1ee5932f5d-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.161275 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.162106 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-dqb2w" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.172067 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.180617 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d49703c4-2744-4669-baae-fc1ee5932f5d-combined-ca-bundle\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.181347 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvcd4\" (UniqueName: \"kubernetes.io/projected/d49703c4-2744-4669-baae-fc1ee5932f5d-kube-api-access-qvcd4\") pod \"ovn-controller-metrics-rg5mz\" (UID: \"d49703c4-2744-4669-baae-fc1ee5932f5d\") " pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.174713 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.192120 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7h64\" (UniqueName: \"kubernetes.io/projected/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-kube-api-access-f7h64\") pod \"dnsmasq-dns-5bf47b49b7-zbgtt\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.203016 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.250086 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.255047 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.255307 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/817d89c2-920a-49a9-b87d-308f48847b2f-config\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.255396 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-dns-svc\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.255443 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/817d89c2-920a-49a9-b87d-308f48847b2f-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.255513 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.255539 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/817d89c2-920a-49a9-b87d-308f48847b2f-scripts\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.255690 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2t2kq\" (UniqueName: \"kubernetes.io/projected/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-kube-api-access-2t2kq\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.255812 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnhc8\" (UniqueName: \"kubernetes.io/projected/817d89c2-920a-49a9-b87d-308f48847b2f-kube-api-access-qnhc8\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.255878 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-config\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.255917 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817d89c2-920a-49a9-b87d-308f48847b2f-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.255919 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-rg5mz" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.263274 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/817d89c2-920a-49a9-b87d-308f48847b2f-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.263467 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/817d89c2-920a-49a9-b87d-308f48847b2f-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.312473 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.396748 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.396802 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/817d89c2-920a-49a9-b87d-308f48847b2f-scripts\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.396864 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2t2kq\" (UniqueName: \"kubernetes.io/projected/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-kube-api-access-2t2kq\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.396912 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnhc8\" (UniqueName: \"kubernetes.io/projected/817d89c2-920a-49a9-b87d-308f48847b2f-kube-api-access-qnhc8\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.396960 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-config\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.396985 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817d89c2-920a-49a9-b87d-308f48847b2f-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.397006 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/817d89c2-920a-49a9-b87d-308f48847b2f-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.397046 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/817d89c2-920a-49a9-b87d-308f48847b2f-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.397093 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.397155 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/817d89c2-920a-49a9-b87d-308f48847b2f-config\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.397189 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-dns-svc\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.397225 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/817d89c2-920a-49a9-b87d-308f48847b2f-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.397849 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.401924 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/817d89c2-920a-49a9-b87d-308f48847b2f-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.402565 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/817d89c2-920a-49a9-b87d-308f48847b2f-scripts\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.405528 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-config\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.406059 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/817d89c2-920a-49a9-b87d-308f48847b2f-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.406372 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/817d89c2-920a-49a9-b87d-308f48847b2f-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.406478 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.407416 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-dns-svc\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.408257 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/817d89c2-920a-49a9-b87d-308f48847b2f-config\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.414987 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/817d89c2-920a-49a9-b87d-308f48847b2f-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.431181 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2t2kq\" (UniqueName: \"kubernetes.io/projected/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-kube-api-access-2t2kq\") pod \"dnsmasq-dns-8554648995-zvj76\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.431523 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnhc8\" (UniqueName: \"kubernetes.io/projected/817d89c2-920a-49a9-b87d-308f48847b2f-kube-api-access-qnhc8\") pod \"ovn-northd-0\" (UID: \"817d89c2-920a-49a9-b87d-308f48847b2f\") " pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.443616 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.498906 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/feb4fb10-1a2c-4e86-bc14-c75750265150-dns-svc\") pod \"feb4fb10-1a2c-4e86-bc14-c75750265150\" (UID: \"feb4fb10-1a2c-4e86-bc14-c75750265150\") " Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.499466 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/feb4fb10-1a2c-4e86-bc14-c75750265150-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "feb4fb10-1a2c-4e86-bc14-c75750265150" (UID: "feb4fb10-1a2c-4e86-bc14-c75750265150"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.499718 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/feb4fb10-1a2c-4e86-bc14-c75750265150-config\") pod \"feb4fb10-1a2c-4e86-bc14-c75750265150\" (UID: \"feb4fb10-1a2c-4e86-bc14-c75750265150\") " Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.499885 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vssv4\" (UniqueName: \"kubernetes.io/projected/feb4fb10-1a2c-4e86-bc14-c75750265150-kube-api-access-vssv4\") pod \"feb4fb10-1a2c-4e86-bc14-c75750265150\" (UID: \"feb4fb10-1a2c-4e86-bc14-c75750265150\") " Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.500291 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/feb4fb10-1a2c-4e86-bc14-c75750265150-config" (OuterVolumeSpecName: "config") pod "feb4fb10-1a2c-4e86-bc14-c75750265150" (UID: "feb4fb10-1a2c-4e86-bc14-c75750265150"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.504016 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/feb4fb10-1a2c-4e86-bc14-c75750265150-kube-api-access-vssv4" (OuterVolumeSpecName: "kube-api-access-vssv4") pod "feb4fb10-1a2c-4e86-bc14-c75750265150" (UID: "feb4fb10-1a2c-4e86-bc14-c75750265150"). InnerVolumeSpecName "kube-api-access-vssv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.504404 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/feb4fb10-1a2c-4e86-bc14-c75750265150-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.504423 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/feb4fb10-1a2c-4e86-bc14-c75750265150-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.504432 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vssv4\" (UniqueName: \"kubernetes.io/projected/feb4fb10-1a2c-4e86-bc14-c75750265150-kube-api-access-vssv4\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.528364 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.553697 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.613104 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd","Type":"ContainerStarted","Data":"22b187660886426dc3223a2ee13d3e0444ed05487a9b073a715fdc64ebe40078"} Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.615684 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" event={"ID":"feb4fb10-1a2c-4e86-bc14-c75750265150","Type":"ContainerDied","Data":"8650effc27e07562d7dcdf523b773e21526599fe01ee414b10fcf64bbbf595bb"} Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.615714 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-q2mbw" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.622316 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.622451 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-m87sq" event={"ID":"124b25d9-0d06-4689-8612-4f8b8ca3b0e6","Type":"ContainerDied","Data":"c6852511e6465fb819fb5d2ad1bc2827b4d74f2f7130bea8471a5f9512db5a6b"} Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.650686 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-rg5mz"] Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.658494 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=8.533114397 podStartE2EDuration="53.658472138s" podCreationTimestamp="2025-12-11 10:54:11 +0000 UTC" firstStartedPulling="2025-12-11 10:54:13.960104554 +0000 UTC m=+1170.778664143" lastFinishedPulling="2025-12-11 10:54:59.085462305 +0000 UTC m=+1215.904021884" observedRunningTime="2025-12-11 10:55:04.640017555 +0000 UTC m=+1221.458577144" watchObservedRunningTime="2025-12-11 10:55:04.658472138 +0000 UTC m=+1221.477031717" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.706858 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9qhj\" (UniqueName: \"kubernetes.io/projected/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-kube-api-access-q9qhj\") pod \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\" (UID: \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\") " Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.707578 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-dns-svc\") pod \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\" (UID: \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\") " Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.707679 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-config\") pod \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\" (UID: \"124b25d9-0d06-4689-8612-4f8b8ca3b0e6\") " Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.708261 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-config" (OuterVolumeSpecName: "config") pod "124b25d9-0d06-4689-8612-4f8b8ca3b0e6" (UID: "124b25d9-0d06-4689-8612-4f8b8ca3b0e6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.708362 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "124b25d9-0d06-4689-8612-4f8b8ca3b0e6" (UID: "124b25d9-0d06-4689-8612-4f8b8ca3b0e6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.710875 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.711448 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.713868 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-kube-api-access-q9qhj" (OuterVolumeSpecName: "kube-api-access-q9qhj") pod "124b25d9-0d06-4689-8612-4f8b8ca3b0e6" (UID: "124b25d9-0d06-4689-8612-4f8b8ca3b0e6"). InnerVolumeSpecName "kube-api-access-q9qhj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.757725 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.813045 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9qhj\" (UniqueName: \"kubernetes.io/projected/124b25d9-0d06-4689-8612-4f8b8ca3b0e6-kube-api-access-q9qhj\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.861181 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-q2mbw"] Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.867719 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-q2mbw"] Dec 11 10:55:04 crc kubenswrapper[5016]: I1211 10:55:04.925074 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-zvj76"] Dec 11 10:55:05 crc kubenswrapper[5016]: I1211 10:55:05.000115 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-zbgtt"] Dec 11 10:55:05 crc kubenswrapper[5016]: I1211 10:55:05.045483 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-m87sq"] Dec 11 10:55:05 crc kubenswrapper[5016]: I1211 10:55:05.062609 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-m87sq"] Dec 11 10:55:05 crc kubenswrapper[5016]: E1211 10:55:05.089101 5016 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.53:48022->38.102.83.53:34147: write tcp 38.102.83.53:48022->38.102.83.53:34147: write: connection reset by peer Dec 11 10:55:05 crc kubenswrapper[5016]: I1211 10:55:05.246432 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 11 10:55:05 crc kubenswrapper[5016]: W1211 10:55:05.247369 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod817d89c2_920a_49a9_b87d_308f48847b2f.slice/crio-95bf293b51553eeb9b55cf92e85b054e73dd1d8ce8bd2e7aaeb165bb1fc52dd8 WatchSource:0}: Error finding container 95bf293b51553eeb9b55cf92e85b054e73dd1d8ce8bd2e7aaeb165bb1fc52dd8: Status 404 returned error can't find the container with id 95bf293b51553eeb9b55cf92e85b054e73dd1d8ce8bd2e7aaeb165bb1fc52dd8 Dec 11 10:55:05 crc kubenswrapper[5016]: I1211 10:55:05.484359 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="124b25d9-0d06-4689-8612-4f8b8ca3b0e6" path="/var/lib/kubelet/pods/124b25d9-0d06-4689-8612-4f8b8ca3b0e6/volumes" Dec 11 10:55:05 crc kubenswrapper[5016]: I1211 10:55:05.484825 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="feb4fb10-1a2c-4e86-bc14-c75750265150" path="/var/lib/kubelet/pods/feb4fb10-1a2c-4e86-bc14-c75750265150/volumes" Dec 11 10:55:05 crc kubenswrapper[5016]: I1211 10:55:05.629779 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"817d89c2-920a-49a9-b87d-308f48847b2f","Type":"ContainerStarted","Data":"95bf293b51553eeb9b55cf92e85b054e73dd1d8ce8bd2e7aaeb165bb1fc52dd8"} Dec 11 10:55:05 crc kubenswrapper[5016]: I1211 10:55:05.631382 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-rg5mz" event={"ID":"d49703c4-2744-4669-baae-fc1ee5932f5d","Type":"ContainerStarted","Data":"885003f5d556380fdb406f0597fdb9de37f4437a9e5a36d9cd97c5533b0e1162"} Dec 11 10:55:05 crc kubenswrapper[5016]: I1211 10:55:05.631408 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-rg5mz" event={"ID":"d49703c4-2744-4669-baae-fc1ee5932f5d","Type":"ContainerStarted","Data":"24f2c94477c76fec09e1dc81efd63f033d7923a416b8d1c421517aa3d702200b"} Dec 11 10:55:05 crc kubenswrapper[5016]: I1211 10:55:05.632578 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-zvj76" event={"ID":"2f9cf295-ec3d-484a-b0ba-9577386e8ef1","Type":"ContainerStarted","Data":"b29dd600993d56aca9f36d9392eb7339355ace5ecae289d0136ef31b5fdf24d2"} Dec 11 10:55:05 crc kubenswrapper[5016]: I1211 10:55:05.633634 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" event={"ID":"865ae06f-219d-44bc-a90b-ab44fe5fb8bc","Type":"ContainerStarted","Data":"d51953f0fc71d68f46b51be7d36fcd8b4c6b163b25d9eb61def90b1ae71a5e47"} Dec 11 10:55:05 crc kubenswrapper[5016]: I1211 10:55:05.650995 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-rg5mz" podStartSLOduration=2.650975771 podStartE2EDuration="2.650975771s" podCreationTimestamp="2025-12-11 10:55:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:55:05.64765195 +0000 UTC m=+1222.466211539" watchObservedRunningTime="2025-12-11 10:55:05.650975771 +0000 UTC m=+1222.469535350" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.667822 5016 generic.go:334] "Generic (PLEG): container finished" podID="2f9cf295-ec3d-484a-b0ba-9577386e8ef1" containerID="e87befc630794a9bb6e23c38d3f06672c0274975ed8a92ffa0d1e007452af8d1" exitCode=0 Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.668013 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-zvj76" event={"ID":"2f9cf295-ec3d-484a-b0ba-9577386e8ef1","Type":"ContainerDied","Data":"e87befc630794a9bb6e23c38d3f06672c0274975ed8a92ffa0d1e007452af8d1"} Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.689206 5016 generic.go:334] "Generic (PLEG): container finished" podID="865ae06f-219d-44bc-a90b-ab44fe5fb8bc" containerID="5a5a14b0faa8c49cbb7599b7183cc76356e64bdd0852ef36351b2cef9ebaf528" exitCode=0 Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.690738 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" event={"ID":"865ae06f-219d-44bc-a90b-ab44fe5fb8bc","Type":"ContainerDied","Data":"5a5a14b0faa8c49cbb7599b7183cc76356e64bdd0852ef36351b2cef9ebaf528"} Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.759965 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-zbgtt"] Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.776071 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.810998 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-6h9sw"] Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.812441 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.819902 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-6h9sw"] Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.866005 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.866082 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-config\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.866120 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltcsz\" (UniqueName: \"kubernetes.io/projected/e22ba3c0-863e-417e-bbfe-6ca4426f9936-kube-api-access-ltcsz\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.866153 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.866181 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.969586 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-config\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.969657 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltcsz\" (UniqueName: \"kubernetes.io/projected/e22ba3c0-863e-417e-bbfe-6ca4426f9936-kube-api-access-ltcsz\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.969690 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.969712 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.969814 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.970855 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.971533 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-config\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.971847 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:06 crc kubenswrapper[5016]: I1211 10:55:06.972362 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.000849 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltcsz\" (UniqueName: \"kubernetes.io/projected/e22ba3c0-863e-417e-bbfe-6ca4426f9936-kube-api-access-ltcsz\") pod \"dnsmasq-dns-b8fbc5445-6h9sw\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.134611 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.606166 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-6h9sw"] Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.704522 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" event={"ID":"e22ba3c0-863e-417e-bbfe-6ca4426f9936","Type":"ContainerStarted","Data":"beef41358df5d09134c7f99ae2cfa18e8f8191c8734d5f5f83935a24d9a70c18"} Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.708060 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"817d89c2-920a-49a9-b87d-308f48847b2f","Type":"ContainerStarted","Data":"6c2ec7d810d99582738102fc7f134ae40829a3bda11b129de6ac737ede049202"} Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.708110 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"817d89c2-920a-49a9-b87d-308f48847b2f","Type":"ContainerStarted","Data":"9a47609c3ad635ad0c2278a636efedfe383b03bc393dfb9de59b401cff24df66"} Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.709413 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.723350 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"be590587-03d9-4391-98b3-bacb7432ec51","Type":"ContainerStarted","Data":"8e002570cb42f8f46703eb75e8f3a14d7862cb909adf0fddc607d56cf7a7fece"} Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.728210 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-zvj76" event={"ID":"2f9cf295-ec3d-484a-b0ba-9577386e8ef1","Type":"ContainerStarted","Data":"4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236"} Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.729210 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.735158 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=1.9886033300000001 podStartE2EDuration="3.735116972s" podCreationTimestamp="2025-12-11 10:55:04 +0000 UTC" firstStartedPulling="2025-12-11 10:55:05.249590499 +0000 UTC m=+1222.068150078" lastFinishedPulling="2025-12-11 10:55:06.996104141 +0000 UTC m=+1223.814663720" observedRunningTime="2025-12-11 10:55:07.729627717 +0000 UTC m=+1224.548187336" watchObservedRunningTime="2025-12-11 10:55:07.735116972 +0000 UTC m=+1224.553676571" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.743380 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" event={"ID":"865ae06f-219d-44bc-a90b-ab44fe5fb8bc","Type":"ContainerStarted","Data":"cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963"} Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.743585 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" podUID="865ae06f-219d-44bc-a90b-ab44fe5fb8bc" containerName="dnsmasq-dns" containerID="cri-o://cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963" gracePeriod=10 Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.743846 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.767173 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-zvj76" podStartSLOduration=2.957660387 podStartE2EDuration="3.767150008s" podCreationTimestamp="2025-12-11 10:55:04 +0000 UTC" firstStartedPulling="2025-12-11 10:55:04.975331096 +0000 UTC m=+1221.793890675" lastFinishedPulling="2025-12-11 10:55:05.784820717 +0000 UTC m=+1222.603380296" observedRunningTime="2025-12-11 10:55:07.75990601 +0000 UTC m=+1224.578465589" watchObservedRunningTime="2025-12-11 10:55:07.767150008 +0000 UTC m=+1224.585709607" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.815298 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" podStartSLOduration=4.051282416 podStartE2EDuration="4.81527892s" podCreationTimestamp="2025-12-11 10:55:03 +0000 UTC" firstStartedPulling="2025-12-11 10:55:05.020827933 +0000 UTC m=+1221.839387512" lastFinishedPulling="2025-12-11 10:55:05.784824437 +0000 UTC m=+1222.603384016" observedRunningTime="2025-12-11 10:55:07.814267484 +0000 UTC m=+1224.632827063" watchObservedRunningTime="2025-12-11 10:55:07.81527892 +0000 UTC m=+1224.633838489" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.878063 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.884057 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.886653 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.886694 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.886819 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-chhmj" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.900983 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.903770 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.987077 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dv2qt\" (UniqueName: \"kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-kube-api-access-dv2qt\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.988015 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.988086 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a24f6c06-a757-4b4b-9361-e87f07af2ca8-cache\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.988314 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a24f6c06-a757-4b4b-9361-e87f07af2ca8-lock\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:07 crc kubenswrapper[5016]: I1211 10:55:07.988733 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.094688 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.094773 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dv2qt\" (UniqueName: \"kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-kube-api-access-dv2qt\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.094839 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.094872 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a24f6c06-a757-4b4b-9361-e87f07af2ca8-cache\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.094926 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a24f6c06-a757-4b4b-9361-e87f07af2ca8-lock\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:08 crc kubenswrapper[5016]: E1211 10:55:08.095258 5016 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 11 10:55:08 crc kubenswrapper[5016]: E1211 10:55:08.095305 5016 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 11 10:55:08 crc kubenswrapper[5016]: E1211 10:55:08.095392 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift podName:a24f6c06-a757-4b4b-9361-e87f07af2ca8 nodeName:}" failed. No retries permitted until 2025-12-11 10:55:08.595359555 +0000 UTC m=+1225.413919134 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift") pod "swift-storage-0" (UID: "a24f6c06-a757-4b4b-9361-e87f07af2ca8") : configmap "swift-ring-files" not found Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.095533 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a24f6c06-a757-4b4b-9361-e87f07af2ca8-lock\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.095840 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/swift-storage-0" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.096025 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a24f6c06-a757-4b4b-9361-e87f07af2ca8-cache\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.134774 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dv2qt\" (UniqueName: \"kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-kube-api-access-dv2qt\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.160174 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.343289 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.481555 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-cfbpr"] Dec 11 10:55:08 crc kubenswrapper[5016]: E1211 10:55:08.481980 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="865ae06f-219d-44bc-a90b-ab44fe5fb8bc" containerName="init" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.482007 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="865ae06f-219d-44bc-a90b-ab44fe5fb8bc" containerName="init" Dec 11 10:55:08 crc kubenswrapper[5016]: E1211 10:55:08.482038 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="865ae06f-219d-44bc-a90b-ab44fe5fb8bc" containerName="dnsmasq-dns" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.482044 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="865ae06f-219d-44bc-a90b-ab44fe5fb8bc" containerName="dnsmasq-dns" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.482206 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="865ae06f-219d-44bc-a90b-ab44fe5fb8bc" containerName="dnsmasq-dns" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.482819 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.490921 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.491112 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.491276 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.501827 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-dns-svc\") pod \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.502212 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-config\") pod \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.502299 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7h64\" (UniqueName: \"kubernetes.io/projected/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-kube-api-access-f7h64\") pod \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.502423 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-ovsdbserver-nb\") pod \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\" (UID: \"865ae06f-219d-44bc-a90b-ab44fe5fb8bc\") " Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.505570 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-cfbpr"] Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.565384 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-kube-api-access-f7h64" (OuterVolumeSpecName: "kube-api-access-f7h64") pod "865ae06f-219d-44bc-a90b-ab44fe5fb8bc" (UID: "865ae06f-219d-44bc-a90b-ab44fe5fb8bc"). InnerVolumeSpecName "kube-api-access-f7h64". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.600670 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "865ae06f-219d-44bc-a90b-ab44fe5fb8bc" (UID: "865ae06f-219d-44bc-a90b-ab44fe5fb8bc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.604449 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.604695 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-ring-data-devices\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: E1211 10:55:08.604768 5016 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 11 10:55:08 crc kubenswrapper[5016]: E1211 10:55:08.604813 5016 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.604786 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxprh\" (UniqueName: \"kubernetes.io/projected/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-kube-api-access-bxprh\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: E1211 10:55:08.604922 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift podName:a24f6c06-a757-4b4b-9361-e87f07af2ca8 nodeName:}" failed. No retries permitted until 2025-12-11 10:55:09.604879112 +0000 UTC m=+1226.423438691 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift") pod "swift-storage-0" (UID: "a24f6c06-a757-4b4b-9361-e87f07af2ca8") : configmap "swift-ring-files" not found Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.605169 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-etc-swift\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.605274 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-swiftconf\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.605305 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-scripts\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.605352 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-combined-ca-bundle\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.605491 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-dispersionconf\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.605624 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7h64\" (UniqueName: \"kubernetes.io/projected/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-kube-api-access-f7h64\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.605644 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.608155 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-config" (OuterVolumeSpecName: "config") pod "865ae06f-219d-44bc-a90b-ab44fe5fb8bc" (UID: "865ae06f-219d-44bc-a90b-ab44fe5fb8bc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.613353 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "865ae06f-219d-44bc-a90b-ab44fe5fb8bc" (UID: "865ae06f-219d-44bc-a90b-ab44fe5fb8bc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.706638 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-swiftconf\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.706694 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-scripts\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.706715 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-combined-ca-bundle\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.706748 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-dispersionconf\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.706829 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-ring-data-devices\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.706853 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxprh\" (UniqueName: \"kubernetes.io/projected/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-kube-api-access-bxprh\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.706894 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-etc-swift\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.706958 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.706973 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/865ae06f-219d-44bc-a90b-ab44fe5fb8bc-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.707586 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-etc-swift\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.708326 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-ring-data-devices\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.708860 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-scripts\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.771224 5016 generic.go:334] "Generic (PLEG): container finished" podID="865ae06f-219d-44bc-a90b-ab44fe5fb8bc" containerID="cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963" exitCode=0 Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.771336 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" event={"ID":"865ae06f-219d-44bc-a90b-ab44fe5fb8bc","Type":"ContainerDied","Data":"cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963"} Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.771368 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" event={"ID":"865ae06f-219d-44bc-a90b-ab44fe5fb8bc","Type":"ContainerDied","Data":"d51953f0fc71d68f46b51be7d36fcd8b4c6b163b25d9eb61def90b1ae71a5e47"} Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.771386 5016 scope.go:117] "RemoveContainer" containerID="cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.771544 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-zbgtt" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.775097 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxprh\" (UniqueName: \"kubernetes.io/projected/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-kube-api-access-bxprh\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.775209 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-dispersionconf\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.775390 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-swiftconf\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.788323 5016 generic.go:334] "Generic (PLEG): container finished" podID="e22ba3c0-863e-417e-bbfe-6ca4426f9936" containerID="fb840a84a6c291b99bbbaeca9aa0d24e2a9096473bae8d54b271b3ca743c72cc" exitCode=0 Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.790382 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" event={"ID":"e22ba3c0-863e-417e-bbfe-6ca4426f9936","Type":"ContainerDied","Data":"fb840a84a6c291b99bbbaeca9aa0d24e2a9096473bae8d54b271b3ca743c72cc"} Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.799914 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-combined-ca-bundle\") pod \"swift-ring-rebalance-cfbpr\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.804785 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.844079 5016 scope.go:117] "RemoveContainer" containerID="5a5a14b0faa8c49cbb7599b7183cc76356e64bdd0852ef36351b2cef9ebaf528" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.907286 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-zbgtt"] Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.907790 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-zbgtt"] Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.946495 5016 scope.go:117] "RemoveContainer" containerID="cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963" Dec 11 10:55:08 crc kubenswrapper[5016]: E1211 10:55:08.947144 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963\": container with ID starting with cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963 not found: ID does not exist" containerID="cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.947275 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963"} err="failed to get container status \"cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963\": rpc error: code = NotFound desc = could not find container \"cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963\": container with ID starting with cc0f4b401b75dba1a1983dba0fb916e6e210bac73c665b5cf877d4de35cc1963 not found: ID does not exist" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.947361 5016 scope.go:117] "RemoveContainer" containerID="5a5a14b0faa8c49cbb7599b7183cc76356e64bdd0852ef36351b2cef9ebaf528" Dec 11 10:55:08 crc kubenswrapper[5016]: E1211 10:55:08.950001 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a5a14b0faa8c49cbb7599b7183cc76356e64bdd0852ef36351b2cef9ebaf528\": container with ID starting with 5a5a14b0faa8c49cbb7599b7183cc76356e64bdd0852ef36351b2cef9ebaf528 not found: ID does not exist" containerID="5a5a14b0faa8c49cbb7599b7183cc76356e64bdd0852ef36351b2cef9ebaf528" Dec 11 10:55:08 crc kubenswrapper[5016]: I1211 10:55:08.950071 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a5a14b0faa8c49cbb7599b7183cc76356e64bdd0852ef36351b2cef9ebaf528"} err="failed to get container status \"5a5a14b0faa8c49cbb7599b7183cc76356e64bdd0852ef36351b2cef9ebaf528\": rpc error: code = NotFound desc = could not find container \"5a5a14b0faa8c49cbb7599b7183cc76356e64bdd0852ef36351b2cef9ebaf528\": container with ID starting with 5a5a14b0faa8c49cbb7599b7183cc76356e64bdd0852ef36351b2cef9ebaf528 not found: ID does not exist" Dec 11 10:55:09 crc kubenswrapper[5016]: I1211 10:55:09.451866 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-cfbpr"] Dec 11 10:55:09 crc kubenswrapper[5016]: I1211 10:55:09.485744 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="865ae06f-219d-44bc-a90b-ab44fe5fb8bc" path="/var/lib/kubelet/pods/865ae06f-219d-44bc-a90b-ab44fe5fb8bc/volumes" Dec 11 10:55:09 crc kubenswrapper[5016]: I1211 10:55:09.630226 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:09 crc kubenswrapper[5016]: E1211 10:55:09.630470 5016 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 11 10:55:09 crc kubenswrapper[5016]: E1211 10:55:09.630510 5016 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 11 10:55:09 crc kubenswrapper[5016]: E1211 10:55:09.630580 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift podName:a24f6c06-a757-4b4b-9361-e87f07af2ca8 nodeName:}" failed. No retries permitted until 2025-12-11 10:55:11.630562851 +0000 UTC m=+1228.449122430 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift") pod "swift-storage-0" (UID: "a24f6c06-a757-4b4b-9361-e87f07af2ca8") : configmap "swift-ring-files" not found Dec 11 10:55:09 crc kubenswrapper[5016]: I1211 10:55:09.797387 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"313107c9-4bb0-49ad-a67b-7f2e4ae09753","Type":"ContainerStarted","Data":"b2f2efa6faa297b8f0bc3ba17f76e2c24b2691d9c6af06345d0d0383aaacc499"} Dec 11 10:55:09 crc kubenswrapper[5016]: I1211 10:55:09.799214 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-cfbpr" event={"ID":"8d68a71e-cbcb-4ce9-bb01-3b48154074a4","Type":"ContainerStarted","Data":"f966a10a8f9aa0e7d91bf413e55e67ba3a483207d805e0ffee0b9db93e869a46"} Dec 11 10:55:09 crc kubenswrapper[5016]: I1211 10:55:09.801307 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" event={"ID":"e22ba3c0-863e-417e-bbfe-6ca4426f9936","Type":"ContainerStarted","Data":"2386f5dcccc094129c96b672321197b1f7bad5c4bff95188cca67d1db6f06ae7"} Dec 11 10:55:09 crc kubenswrapper[5016]: I1211 10:55:09.801436 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:09 crc kubenswrapper[5016]: I1211 10:55:09.852046 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" podStartSLOduration=3.852030877 podStartE2EDuration="3.852030877s" podCreationTimestamp="2025-12-11 10:55:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:55:09.845497806 +0000 UTC m=+1226.664057385" watchObservedRunningTime="2025-12-11 10:55:09.852030877 +0000 UTC m=+1226.670590456" Dec 11 10:55:11 crc kubenswrapper[5016]: I1211 10:55:11.670550 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:11 crc kubenswrapper[5016]: E1211 10:55:11.670827 5016 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 11 10:55:11 crc kubenswrapper[5016]: E1211 10:55:11.671241 5016 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 11 10:55:11 crc kubenswrapper[5016]: E1211 10:55:11.671330 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift podName:a24f6c06-a757-4b4b-9361-e87f07af2ca8 nodeName:}" failed. No retries permitted until 2025-12-11 10:55:15.671299615 +0000 UTC m=+1232.489859194 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift") pod "swift-storage-0" (UID: "a24f6c06-a757-4b4b-9361-e87f07af2ca8") : configmap "swift-ring-files" not found Dec 11 10:55:11 crc kubenswrapper[5016]: I1211 10:55:11.852828 5016 generic.go:334] "Generic (PLEG): container finished" podID="be590587-03d9-4391-98b3-bacb7432ec51" containerID="8e002570cb42f8f46703eb75e8f3a14d7862cb909adf0fddc607d56cf7a7fece" exitCode=0 Dec 11 10:55:11 crc kubenswrapper[5016]: I1211 10:55:11.852902 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"be590587-03d9-4391-98b3-bacb7432ec51","Type":"ContainerDied","Data":"8e002570cb42f8f46703eb75e8f3a14d7862cb909adf0fddc607d56cf7a7fece"} Dec 11 10:55:12 crc kubenswrapper[5016]: I1211 10:55:12.933260 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:55:12 crc kubenswrapper[5016]: I1211 10:55:12.933668 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:55:13 crc kubenswrapper[5016]: I1211 10:55:13.015457 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Dec 11 10:55:13 crc kubenswrapper[5016]: I1211 10:55:13.015519 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Dec 11 10:55:13 crc kubenswrapper[5016]: I1211 10:55:13.101514 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Dec 11 10:55:13 crc kubenswrapper[5016]: I1211 10:55:13.870071 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-cfbpr" event={"ID":"8d68a71e-cbcb-4ce9-bb01-3b48154074a4","Type":"ContainerStarted","Data":"58a8aee13fa31d487dcf96e04a18fe6d63bd3250ad5f18e19b9bacefac0b430e"} Dec 11 10:55:13 crc kubenswrapper[5016]: I1211 10:55:13.873885 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"be590587-03d9-4391-98b3-bacb7432ec51","Type":"ContainerStarted","Data":"e77f5e168b60796d920b4d15d4d7aa35594bfc1fa2f5354ec08c24a4022941a0"} Dec 11 10:55:13 crc kubenswrapper[5016]: I1211 10:55:13.898274 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-cfbpr" podStartSLOduration=1.934694997 podStartE2EDuration="5.898252082s" podCreationTimestamp="2025-12-11 10:55:08 +0000 UTC" firstStartedPulling="2025-12-11 10:55:09.457994934 +0000 UTC m=+1226.276554513" lastFinishedPulling="2025-12-11 10:55:13.421552029 +0000 UTC m=+1230.240111598" observedRunningTime="2025-12-11 10:55:13.886704857 +0000 UTC m=+1230.705264456" watchObservedRunningTime="2025-12-11 10:55:13.898252082 +0000 UTC m=+1230.716811661" Dec 11 10:55:13 crc kubenswrapper[5016]: I1211 10:55:13.914412 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371975.940388 podStartE2EDuration="1m0.914388917s" podCreationTimestamp="2025-12-11 10:54:13 +0000 UTC" firstStartedPulling="2025-12-11 10:54:15.362640483 +0000 UTC m=+1172.181200062" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:55:13.910130643 +0000 UTC m=+1230.728690232" watchObservedRunningTime="2025-12-11 10:55:13.914388917 +0000 UTC m=+1230.732948506" Dec 11 10:55:13 crc kubenswrapper[5016]: I1211 10:55:13.963851 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.416276 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-1dd0-account-create-update-26v9f"] Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.417361 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-1dd0-account-create-update-26v9f" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.419512 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.446026 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.464726 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-nxzgl"] Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.472294 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-nxzgl" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.503063 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-nxzgl"] Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.522994 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-1dd0-account-create-update-26v9f"] Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.532226 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57l94\" (UniqueName: \"kubernetes.io/projected/c3710c5e-0042-4f48-861d-c793cf81e42f-kube-api-access-57l94\") pod \"keystone-db-create-nxzgl\" (UID: \"c3710c5e-0042-4f48-861d-c793cf81e42f\") " pod="openstack/keystone-db-create-nxzgl" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.532563 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dxn2\" (UniqueName: \"kubernetes.io/projected/4af08fef-0f95-4448-bd66-b84589609611-kube-api-access-4dxn2\") pod \"keystone-1dd0-account-create-update-26v9f\" (UID: \"4af08fef-0f95-4448-bd66-b84589609611\") " pod="openstack/keystone-1dd0-account-create-update-26v9f" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.532680 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4af08fef-0f95-4448-bd66-b84589609611-operator-scripts\") pod \"keystone-1dd0-account-create-update-26v9f\" (UID: \"4af08fef-0f95-4448-bd66-b84589609611\") " pod="openstack/keystone-1dd0-account-create-update-26v9f" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.532828 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3710c5e-0042-4f48-861d-c793cf81e42f-operator-scripts\") pod \"keystone-db-create-nxzgl\" (UID: \"c3710c5e-0042-4f48-861d-c793cf81e42f\") " pod="openstack/keystone-db-create-nxzgl" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.597154 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.597211 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.634337 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57l94\" (UniqueName: \"kubernetes.io/projected/c3710c5e-0042-4f48-861d-c793cf81e42f-kube-api-access-57l94\") pod \"keystone-db-create-nxzgl\" (UID: \"c3710c5e-0042-4f48-861d-c793cf81e42f\") " pod="openstack/keystone-db-create-nxzgl" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.634752 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dxn2\" (UniqueName: \"kubernetes.io/projected/4af08fef-0f95-4448-bd66-b84589609611-kube-api-access-4dxn2\") pod \"keystone-1dd0-account-create-update-26v9f\" (UID: \"4af08fef-0f95-4448-bd66-b84589609611\") " pod="openstack/keystone-1dd0-account-create-update-26v9f" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.634783 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4af08fef-0f95-4448-bd66-b84589609611-operator-scripts\") pod \"keystone-1dd0-account-create-update-26v9f\" (UID: \"4af08fef-0f95-4448-bd66-b84589609611\") " pod="openstack/keystone-1dd0-account-create-update-26v9f" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.634845 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3710c5e-0042-4f48-861d-c793cf81e42f-operator-scripts\") pod \"keystone-db-create-nxzgl\" (UID: \"c3710c5e-0042-4f48-861d-c793cf81e42f\") " pod="openstack/keystone-db-create-nxzgl" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.635779 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3710c5e-0042-4f48-861d-c793cf81e42f-operator-scripts\") pod \"keystone-db-create-nxzgl\" (UID: \"c3710c5e-0042-4f48-861d-c793cf81e42f\") " pod="openstack/keystone-db-create-nxzgl" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.635825 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4af08fef-0f95-4448-bd66-b84589609611-operator-scripts\") pod \"keystone-1dd0-account-create-update-26v9f\" (UID: \"4af08fef-0f95-4448-bd66-b84589609611\") " pod="openstack/keystone-1dd0-account-create-update-26v9f" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.657779 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57l94\" (UniqueName: \"kubernetes.io/projected/c3710c5e-0042-4f48-861d-c793cf81e42f-kube-api-access-57l94\") pod \"keystone-db-create-nxzgl\" (UID: \"c3710c5e-0042-4f48-861d-c793cf81e42f\") " pod="openstack/keystone-db-create-nxzgl" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.658224 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dxn2\" (UniqueName: \"kubernetes.io/projected/4af08fef-0f95-4448-bd66-b84589609611-kube-api-access-4dxn2\") pod \"keystone-1dd0-account-create-update-26v9f\" (UID: \"4af08fef-0f95-4448-bd66-b84589609611\") " pod="openstack/keystone-1dd0-account-create-update-26v9f" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.674662 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-49xxd"] Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.675789 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-49xxd" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.694722 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-49xxd"] Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.738646 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jd8lt\" (UniqueName: \"kubernetes.io/projected/a0936b89-101b-4e57-81e5-756361104037-kube-api-access-jd8lt\") pod \"placement-db-create-49xxd\" (UID: \"a0936b89-101b-4e57-81e5-756361104037\") " pod="openstack/placement-db-create-49xxd" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.739087 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0936b89-101b-4e57-81e5-756361104037-operator-scripts\") pod \"placement-db-create-49xxd\" (UID: \"a0936b89-101b-4e57-81e5-756361104037\") " pod="openstack/placement-db-create-49xxd" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.758717 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-1dd0-account-create-update-26v9f" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.807305 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-nxzgl" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.840970 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0936b89-101b-4e57-81e5-756361104037-operator-scripts\") pod \"placement-db-create-49xxd\" (UID: \"a0936b89-101b-4e57-81e5-756361104037\") " pod="openstack/placement-db-create-49xxd" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.841092 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jd8lt\" (UniqueName: \"kubernetes.io/projected/a0936b89-101b-4e57-81e5-756361104037-kube-api-access-jd8lt\") pod \"placement-db-create-49xxd\" (UID: \"a0936b89-101b-4e57-81e5-756361104037\") " pod="openstack/placement-db-create-49xxd" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.842540 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0936b89-101b-4e57-81e5-756361104037-operator-scripts\") pod \"placement-db-create-49xxd\" (UID: \"a0936b89-101b-4e57-81e5-756361104037\") " pod="openstack/placement-db-create-49xxd" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.845490 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5ecb-account-create-update-t2kgc"] Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.846966 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5ecb-account-create-update-t2kgc" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.851459 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.856810 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5ecb-account-create-update-t2kgc"] Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.883818 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jd8lt\" (UniqueName: \"kubernetes.io/projected/a0936b89-101b-4e57-81e5-756361104037-kube-api-access-jd8lt\") pod \"placement-db-create-49xxd\" (UID: \"a0936b89-101b-4e57-81e5-756361104037\") " pod="openstack/placement-db-create-49xxd" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.954570 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-5rlq6"] Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.956171 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5rlq6" Dec 11 10:55:14 crc kubenswrapper[5016]: I1211 10:55:14.971057 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5rlq6"] Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.043837 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-9c93-account-create-update-kwv2t"] Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.045061 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-9c93-account-create-update-kwv2t" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.055035 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.055485 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-49xxd" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.065872 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e23b5f15-b825-4869-a7f8-93a8c60a090a-operator-scripts\") pod \"glance-9c93-account-create-update-kwv2t\" (UID: \"e23b5f15-b825-4869-a7f8-93a8c60a090a\") " pod="openstack/glance-9c93-account-create-update-kwv2t" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.065922 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e6d363c-70c9-4659-aa69-dc91e3f86e07-operator-scripts\") pod \"glance-db-create-5rlq6\" (UID: \"5e6d363c-70c9-4659-aa69-dc91e3f86e07\") " pod="openstack/glance-db-create-5rlq6" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.066576 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzmc5\" (UniqueName: \"kubernetes.io/projected/c798030e-6efe-4b81-9d60-96dc199e420a-kube-api-access-bzmc5\") pod \"placement-5ecb-account-create-update-t2kgc\" (UID: \"c798030e-6efe-4b81-9d60-96dc199e420a\") " pod="openstack/placement-5ecb-account-create-update-t2kgc" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.066648 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c798030e-6efe-4b81-9d60-96dc199e420a-operator-scripts\") pod \"placement-5ecb-account-create-update-t2kgc\" (UID: \"c798030e-6efe-4b81-9d60-96dc199e420a\") " pod="openstack/placement-5ecb-account-create-update-t2kgc" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.066700 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g747b\" (UniqueName: \"kubernetes.io/projected/5e6d363c-70c9-4659-aa69-dc91e3f86e07-kube-api-access-g747b\") pod \"glance-db-create-5rlq6\" (UID: \"5e6d363c-70c9-4659-aa69-dc91e3f86e07\") " pod="openstack/glance-db-create-5rlq6" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.066749 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdk4m\" (UniqueName: \"kubernetes.io/projected/e23b5f15-b825-4869-a7f8-93a8c60a090a-kube-api-access-wdk4m\") pod \"glance-9c93-account-create-update-kwv2t\" (UID: \"e23b5f15-b825-4869-a7f8-93a8c60a090a\") " pod="openstack/glance-9c93-account-create-update-kwv2t" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.078483 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-9c93-account-create-update-kwv2t"] Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.168620 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzmc5\" (UniqueName: \"kubernetes.io/projected/c798030e-6efe-4b81-9d60-96dc199e420a-kube-api-access-bzmc5\") pod \"placement-5ecb-account-create-update-t2kgc\" (UID: \"c798030e-6efe-4b81-9d60-96dc199e420a\") " pod="openstack/placement-5ecb-account-create-update-t2kgc" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.168710 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c798030e-6efe-4b81-9d60-96dc199e420a-operator-scripts\") pod \"placement-5ecb-account-create-update-t2kgc\" (UID: \"c798030e-6efe-4b81-9d60-96dc199e420a\") " pod="openstack/placement-5ecb-account-create-update-t2kgc" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.168747 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g747b\" (UniqueName: \"kubernetes.io/projected/5e6d363c-70c9-4659-aa69-dc91e3f86e07-kube-api-access-g747b\") pod \"glance-db-create-5rlq6\" (UID: \"5e6d363c-70c9-4659-aa69-dc91e3f86e07\") " pod="openstack/glance-db-create-5rlq6" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.168798 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdk4m\" (UniqueName: \"kubernetes.io/projected/e23b5f15-b825-4869-a7f8-93a8c60a090a-kube-api-access-wdk4m\") pod \"glance-9c93-account-create-update-kwv2t\" (UID: \"e23b5f15-b825-4869-a7f8-93a8c60a090a\") " pod="openstack/glance-9c93-account-create-update-kwv2t" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.169131 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e23b5f15-b825-4869-a7f8-93a8c60a090a-operator-scripts\") pod \"glance-9c93-account-create-update-kwv2t\" (UID: \"e23b5f15-b825-4869-a7f8-93a8c60a090a\") " pod="openstack/glance-9c93-account-create-update-kwv2t" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.169167 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e6d363c-70c9-4659-aa69-dc91e3f86e07-operator-scripts\") pod \"glance-db-create-5rlq6\" (UID: \"5e6d363c-70c9-4659-aa69-dc91e3f86e07\") " pod="openstack/glance-db-create-5rlq6" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.170001 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e6d363c-70c9-4659-aa69-dc91e3f86e07-operator-scripts\") pod \"glance-db-create-5rlq6\" (UID: \"5e6d363c-70c9-4659-aa69-dc91e3f86e07\") " pod="openstack/glance-db-create-5rlq6" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.170487 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e23b5f15-b825-4869-a7f8-93a8c60a090a-operator-scripts\") pod \"glance-9c93-account-create-update-kwv2t\" (UID: \"e23b5f15-b825-4869-a7f8-93a8c60a090a\") " pod="openstack/glance-9c93-account-create-update-kwv2t" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.174624 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c798030e-6efe-4b81-9d60-96dc199e420a-operator-scripts\") pod \"placement-5ecb-account-create-update-t2kgc\" (UID: \"c798030e-6efe-4b81-9d60-96dc199e420a\") " pod="openstack/placement-5ecb-account-create-update-t2kgc" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.196527 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdk4m\" (UniqueName: \"kubernetes.io/projected/e23b5f15-b825-4869-a7f8-93a8c60a090a-kube-api-access-wdk4m\") pod \"glance-9c93-account-create-update-kwv2t\" (UID: \"e23b5f15-b825-4869-a7f8-93a8c60a090a\") " pod="openstack/glance-9c93-account-create-update-kwv2t" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.196908 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzmc5\" (UniqueName: \"kubernetes.io/projected/c798030e-6efe-4b81-9d60-96dc199e420a-kube-api-access-bzmc5\") pod \"placement-5ecb-account-create-update-t2kgc\" (UID: \"c798030e-6efe-4b81-9d60-96dc199e420a\") " pod="openstack/placement-5ecb-account-create-update-t2kgc" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.199142 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g747b\" (UniqueName: \"kubernetes.io/projected/5e6d363c-70c9-4659-aa69-dc91e3f86e07-kube-api-access-g747b\") pod \"glance-db-create-5rlq6\" (UID: \"5e6d363c-70c9-4659-aa69-dc91e3f86e07\") " pod="openstack/glance-db-create-5rlq6" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.256547 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5ecb-account-create-update-t2kgc" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.305876 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5rlq6" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.386399 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-nxzgl"] Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.395915 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-9c93-account-create-update-kwv2t" Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.445256 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-1dd0-account-create-update-26v9f"] Dec 11 10:55:15 crc kubenswrapper[5016]: W1211 10:55:15.487523 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4af08fef_0f95_4448_bd66_b84589609611.slice/crio-f5a1848e69fc577ffc144fe3b47e84be408fec686a8fbe61e654240fc05ff35b WatchSource:0}: Error finding container f5a1848e69fc577ffc144fe3b47e84be408fec686a8fbe61e654240fc05ff35b: Status 404 returned error can't find the container with id f5a1848e69fc577ffc144fe3b47e84be408fec686a8fbe61e654240fc05ff35b Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.622382 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-49xxd"] Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.689276 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5ecb-account-create-update-t2kgc"] Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.707542 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:15 crc kubenswrapper[5016]: E1211 10:55:15.707803 5016 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 11 10:55:15 crc kubenswrapper[5016]: E1211 10:55:15.707821 5016 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 11 10:55:15 crc kubenswrapper[5016]: E1211 10:55:15.707870 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift podName:a24f6c06-a757-4b4b-9361-e87f07af2ca8 nodeName:}" failed. No retries permitted until 2025-12-11 10:55:23.707853072 +0000 UTC m=+1240.526412641 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift") pod "swift-storage-0" (UID: "a24f6c06-a757-4b4b-9361-e87f07af2ca8") : configmap "swift-ring-files" not found Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.911451 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-nxzgl" event={"ID":"c3710c5e-0042-4f48-861d-c793cf81e42f","Type":"ContainerStarted","Data":"6484b6e8a338ded84a4f41b9449bdeb9f5c10162f3decd73121d4a940e95656f"} Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.913809 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-49xxd" event={"ID":"a0936b89-101b-4e57-81e5-756361104037","Type":"ContainerStarted","Data":"1b04e658112f5c91cee56d90bf95eb72e7a218486bc6a75f7e02c78348f2ae7c"} Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.915503 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5ecb-account-create-update-t2kgc" event={"ID":"c798030e-6efe-4b81-9d60-96dc199e420a","Type":"ContainerStarted","Data":"44ed259723088be2015030f7e5a70b57ccac584c000b5add80f8fc39333fcee4"} Dec 11 10:55:15 crc kubenswrapper[5016]: I1211 10:55:15.916655 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-1dd0-account-create-update-26v9f" event={"ID":"4af08fef-0f95-4448-bd66-b84589609611","Type":"ContainerStarted","Data":"f5a1848e69fc577ffc144fe3b47e84be408fec686a8fbe61e654240fc05ff35b"} Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.054204 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5rlq6"] Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.225114 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-9c93-account-create-update-kwv2t"] Dec 11 10:55:16 crc kubenswrapper[5016]: W1211 10:55:16.235244 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode23b5f15_b825_4869_a7f8_93a8c60a090a.slice/crio-2aabdb16619e6b2d3a7f256727725496bb831ad8eb1194f9edc069591ddac49f WatchSource:0}: Error finding container 2aabdb16619e6b2d3a7f256727725496bb831ad8eb1194f9edc069591ddac49f: Status 404 returned error can't find the container with id 2aabdb16619e6b2d3a7f256727725496bb831ad8eb1194f9edc069591ddac49f Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.930160 5016 generic.go:334] "Generic (PLEG): container finished" podID="c798030e-6efe-4b81-9d60-96dc199e420a" containerID="0a7cba47a64ef55b5c9266c0d7a7d17a143ec05d43653f9358774cfbe7ecf486" exitCode=0 Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.930302 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5ecb-account-create-update-t2kgc" event={"ID":"c798030e-6efe-4b81-9d60-96dc199e420a","Type":"ContainerDied","Data":"0a7cba47a64ef55b5c9266c0d7a7d17a143ec05d43653f9358774cfbe7ecf486"} Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.932655 5016 generic.go:334] "Generic (PLEG): container finished" podID="4af08fef-0f95-4448-bd66-b84589609611" containerID="9e1d76e1b7fd2318bec5c37411dc7c109345d076eab39700173eba995a3e2fa1" exitCode=0 Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.932706 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-1dd0-account-create-update-26v9f" event={"ID":"4af08fef-0f95-4448-bd66-b84589609611","Type":"ContainerDied","Data":"9e1d76e1b7fd2318bec5c37411dc7c109345d076eab39700173eba995a3e2fa1"} Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.935644 5016 generic.go:334] "Generic (PLEG): container finished" podID="e23b5f15-b825-4869-a7f8-93a8c60a090a" containerID="b5fab13e16c9ffe62925482318c215853371e60c6caf2f2784ca737de1ab7623" exitCode=0 Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.935894 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-9c93-account-create-update-kwv2t" event={"ID":"e23b5f15-b825-4869-a7f8-93a8c60a090a","Type":"ContainerDied","Data":"b5fab13e16c9ffe62925482318c215853371e60c6caf2f2784ca737de1ab7623"} Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.935966 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-9c93-account-create-update-kwv2t" event={"ID":"e23b5f15-b825-4869-a7f8-93a8c60a090a","Type":"ContainerStarted","Data":"2aabdb16619e6b2d3a7f256727725496bb831ad8eb1194f9edc069591ddac49f"} Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.938924 5016 generic.go:334] "Generic (PLEG): container finished" podID="c3710c5e-0042-4f48-861d-c793cf81e42f" containerID="63ae5ae9a4fa7469c6fd35fc38346f5fbf4809490a5e36a0a9b08c9e7ace6c77" exitCode=0 Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.938995 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-nxzgl" event={"ID":"c3710c5e-0042-4f48-861d-c793cf81e42f","Type":"ContainerDied","Data":"63ae5ae9a4fa7469c6fd35fc38346f5fbf4809490a5e36a0a9b08c9e7ace6c77"} Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.941403 5016 generic.go:334] "Generic (PLEG): container finished" podID="5e6d363c-70c9-4659-aa69-dc91e3f86e07" containerID="7a1667836ae9c494ac94ebe9a9c8a1484d54dc711893f846cb233edf5aea2e59" exitCode=0 Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.941460 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5rlq6" event={"ID":"5e6d363c-70c9-4659-aa69-dc91e3f86e07","Type":"ContainerDied","Data":"7a1667836ae9c494ac94ebe9a9c8a1484d54dc711893f846cb233edf5aea2e59"} Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.941504 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5rlq6" event={"ID":"5e6d363c-70c9-4659-aa69-dc91e3f86e07","Type":"ContainerStarted","Data":"cb2e77890a19e7eb6ab1992a7b74b301d08a783707b8fdbc168479ff0de2fe8e"} Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.948169 5016 generic.go:334] "Generic (PLEG): container finished" podID="a0936b89-101b-4e57-81e5-756361104037" containerID="16c6e2c392d039de765be94a80fa7860a850e61ecb31c47031e683a998e7787b" exitCode=0 Dec 11 10:55:16 crc kubenswrapper[5016]: I1211 10:55:16.948244 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-49xxd" event={"ID":"a0936b89-101b-4e57-81e5-756361104037","Type":"ContainerDied","Data":"16c6e2c392d039de765be94a80fa7860a850e61ecb31c47031e683a998e7787b"} Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.136193 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.200719 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-zvj76"] Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.203232 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-zvj76" podUID="2f9cf295-ec3d-484a-b0ba-9577386e8ef1" containerName="dnsmasq-dns" containerID="cri-o://4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236" gracePeriod=10 Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.744407 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.848518 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-dns-svc\") pod \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.848578 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-ovsdbserver-sb\") pod \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.848659 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-config\") pod \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.848868 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2t2kq\" (UniqueName: \"kubernetes.io/projected/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-kube-api-access-2t2kq\") pod \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.848933 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-ovsdbserver-nb\") pod \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\" (UID: \"2f9cf295-ec3d-484a-b0ba-9577386e8ef1\") " Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.871767 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-kube-api-access-2t2kq" (OuterVolumeSpecName: "kube-api-access-2t2kq") pod "2f9cf295-ec3d-484a-b0ba-9577386e8ef1" (UID: "2f9cf295-ec3d-484a-b0ba-9577386e8ef1"). InnerVolumeSpecName "kube-api-access-2t2kq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.905486 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2f9cf295-ec3d-484a-b0ba-9577386e8ef1" (UID: "2f9cf295-ec3d-484a-b0ba-9577386e8ef1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.910450 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2f9cf295-ec3d-484a-b0ba-9577386e8ef1" (UID: "2f9cf295-ec3d-484a-b0ba-9577386e8ef1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.911784 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-config" (OuterVolumeSpecName: "config") pod "2f9cf295-ec3d-484a-b0ba-9577386e8ef1" (UID: "2f9cf295-ec3d-484a-b0ba-9577386e8ef1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.916742 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2f9cf295-ec3d-484a-b0ba-9577386e8ef1" (UID: "2f9cf295-ec3d-484a-b0ba-9577386e8ef1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.951823 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.952379 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2t2kq\" (UniqueName: \"kubernetes.io/projected/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-kube-api-access-2t2kq\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.952400 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.952417 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.952434 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f9cf295-ec3d-484a-b0ba-9577386e8ef1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.967370 5016 generic.go:334] "Generic (PLEG): container finished" podID="2f9cf295-ec3d-484a-b0ba-9577386e8ef1" containerID="4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236" exitCode=0 Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.967547 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-zvj76" Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.967622 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-zvj76" event={"ID":"2f9cf295-ec3d-484a-b0ba-9577386e8ef1","Type":"ContainerDied","Data":"4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236"} Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.967693 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-zvj76" event={"ID":"2f9cf295-ec3d-484a-b0ba-9577386e8ef1","Type":"ContainerDied","Data":"b29dd600993d56aca9f36d9392eb7339355ace5ecae289d0136ef31b5fdf24d2"} Dec 11 10:55:17 crc kubenswrapper[5016]: I1211 10:55:17.967726 5016 scope.go:117] "RemoveContainer" containerID="4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.012685 5016 scope.go:117] "RemoveContainer" containerID="e87befc630794a9bb6e23c38d3f06672c0274975ed8a92ffa0d1e007452af8d1" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.025727 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-zvj76"] Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.039869 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-zvj76"] Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.070966 5016 scope.go:117] "RemoveContainer" containerID="4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236" Dec 11 10:55:18 crc kubenswrapper[5016]: E1211 10:55:18.071343 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236\": container with ID starting with 4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236 not found: ID does not exist" containerID="4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.071380 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236"} err="failed to get container status \"4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236\": rpc error: code = NotFound desc = could not find container \"4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236\": container with ID starting with 4be86d03e52de85bccc23f0ecd8138b3fd532f3f8224095aceb47e4ce8cba236 not found: ID does not exist" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.071401 5016 scope.go:117] "RemoveContainer" containerID="e87befc630794a9bb6e23c38d3f06672c0274975ed8a92ffa0d1e007452af8d1" Dec 11 10:55:18 crc kubenswrapper[5016]: E1211 10:55:18.071695 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e87befc630794a9bb6e23c38d3f06672c0274975ed8a92ffa0d1e007452af8d1\": container with ID starting with e87befc630794a9bb6e23c38d3f06672c0274975ed8a92ffa0d1e007452af8d1 not found: ID does not exist" containerID="e87befc630794a9bb6e23c38d3f06672c0274975ed8a92ffa0d1e007452af8d1" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.071722 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e87befc630794a9bb6e23c38d3f06672c0274975ed8a92ffa0d1e007452af8d1"} err="failed to get container status \"e87befc630794a9bb6e23c38d3f06672c0274975ed8a92ffa0d1e007452af8d1\": rpc error: code = NotFound desc = could not find container \"e87befc630794a9bb6e23c38d3f06672c0274975ed8a92ffa0d1e007452af8d1\": container with ID starting with e87befc630794a9bb6e23c38d3f06672c0274975ed8a92ffa0d1e007452af8d1 not found: ID does not exist" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.409222 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-nxzgl" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.463554 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-57l94\" (UniqueName: \"kubernetes.io/projected/c3710c5e-0042-4f48-861d-c793cf81e42f-kube-api-access-57l94\") pod \"c3710c5e-0042-4f48-861d-c793cf81e42f\" (UID: \"c3710c5e-0042-4f48-861d-c793cf81e42f\") " Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.464010 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3710c5e-0042-4f48-861d-c793cf81e42f-operator-scripts\") pod \"c3710c5e-0042-4f48-861d-c793cf81e42f\" (UID: \"c3710c5e-0042-4f48-861d-c793cf81e42f\") " Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.464517 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3710c5e-0042-4f48-861d-c793cf81e42f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c3710c5e-0042-4f48-861d-c793cf81e42f" (UID: "c3710c5e-0042-4f48-861d-c793cf81e42f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.468251 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3710c5e-0042-4f48-861d-c793cf81e42f-kube-api-access-57l94" (OuterVolumeSpecName: "kube-api-access-57l94") pod "c3710c5e-0042-4f48-861d-c793cf81e42f" (UID: "c3710c5e-0042-4f48-861d-c793cf81e42f"). InnerVolumeSpecName "kube-api-access-57l94". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.566297 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3710c5e-0042-4f48-861d-c793cf81e42f-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.566336 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-57l94\" (UniqueName: \"kubernetes.io/projected/c3710c5e-0042-4f48-861d-c793cf81e42f-kube-api-access-57l94\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.630334 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5ecb-account-create-update-t2kgc" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.643567 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-9c93-account-create-update-kwv2t" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.664121 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-1dd0-account-create-update-26v9f" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.666986 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c798030e-6efe-4b81-9d60-96dc199e420a-operator-scripts\") pod \"c798030e-6efe-4b81-9d60-96dc199e420a\" (UID: \"c798030e-6efe-4b81-9d60-96dc199e420a\") " Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.667103 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzmc5\" (UniqueName: \"kubernetes.io/projected/c798030e-6efe-4b81-9d60-96dc199e420a-kube-api-access-bzmc5\") pod \"c798030e-6efe-4b81-9d60-96dc199e420a\" (UID: \"c798030e-6efe-4b81-9d60-96dc199e420a\") " Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.667225 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e23b5f15-b825-4869-a7f8-93a8c60a090a-operator-scripts\") pod \"e23b5f15-b825-4869-a7f8-93a8c60a090a\" (UID: \"e23b5f15-b825-4869-a7f8-93a8c60a090a\") " Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.667312 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdk4m\" (UniqueName: \"kubernetes.io/projected/e23b5f15-b825-4869-a7f8-93a8c60a090a-kube-api-access-wdk4m\") pod \"e23b5f15-b825-4869-a7f8-93a8c60a090a\" (UID: \"e23b5f15-b825-4869-a7f8-93a8c60a090a\") " Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.669362 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e23b5f15-b825-4869-a7f8-93a8c60a090a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e23b5f15-b825-4869-a7f8-93a8c60a090a" (UID: "e23b5f15-b825-4869-a7f8-93a8c60a090a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.669692 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c798030e-6efe-4b81-9d60-96dc199e420a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c798030e-6efe-4b81-9d60-96dc199e420a" (UID: "c798030e-6efe-4b81-9d60-96dc199e420a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.672856 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e23b5f15-b825-4869-a7f8-93a8c60a090a-kube-api-access-wdk4m" (OuterVolumeSpecName: "kube-api-access-wdk4m") pod "e23b5f15-b825-4869-a7f8-93a8c60a090a" (UID: "e23b5f15-b825-4869-a7f8-93a8c60a090a"). InnerVolumeSpecName "kube-api-access-wdk4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.685142 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c798030e-6efe-4b81-9d60-96dc199e420a-kube-api-access-bzmc5" (OuterVolumeSpecName: "kube-api-access-bzmc5") pod "c798030e-6efe-4b81-9d60-96dc199e420a" (UID: "c798030e-6efe-4b81-9d60-96dc199e420a"). InnerVolumeSpecName "kube-api-access-bzmc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.718320 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-49xxd" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.723981 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5rlq6" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.770267 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jd8lt\" (UniqueName: \"kubernetes.io/projected/a0936b89-101b-4e57-81e5-756361104037-kube-api-access-jd8lt\") pod \"a0936b89-101b-4e57-81e5-756361104037\" (UID: \"a0936b89-101b-4e57-81e5-756361104037\") " Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.770317 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g747b\" (UniqueName: \"kubernetes.io/projected/5e6d363c-70c9-4659-aa69-dc91e3f86e07-kube-api-access-g747b\") pod \"5e6d363c-70c9-4659-aa69-dc91e3f86e07\" (UID: \"5e6d363c-70c9-4659-aa69-dc91e3f86e07\") " Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.770358 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0936b89-101b-4e57-81e5-756361104037-operator-scripts\") pod \"a0936b89-101b-4e57-81e5-756361104037\" (UID: \"a0936b89-101b-4e57-81e5-756361104037\") " Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.770443 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4af08fef-0f95-4448-bd66-b84589609611-operator-scripts\") pod \"4af08fef-0f95-4448-bd66-b84589609611\" (UID: \"4af08fef-0f95-4448-bd66-b84589609611\") " Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.770510 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dxn2\" (UniqueName: \"kubernetes.io/projected/4af08fef-0f95-4448-bd66-b84589609611-kube-api-access-4dxn2\") pod \"4af08fef-0f95-4448-bd66-b84589609611\" (UID: \"4af08fef-0f95-4448-bd66-b84589609611\") " Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.770552 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e6d363c-70c9-4659-aa69-dc91e3f86e07-operator-scripts\") pod \"5e6d363c-70c9-4659-aa69-dc91e3f86e07\" (UID: \"5e6d363c-70c9-4659-aa69-dc91e3f86e07\") " Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.771110 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdk4m\" (UniqueName: \"kubernetes.io/projected/e23b5f15-b825-4869-a7f8-93a8c60a090a-kube-api-access-wdk4m\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.771138 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c798030e-6efe-4b81-9d60-96dc199e420a-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.771153 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzmc5\" (UniqueName: \"kubernetes.io/projected/c798030e-6efe-4b81-9d60-96dc199e420a-kube-api-access-bzmc5\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.771167 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e23b5f15-b825-4869-a7f8-93a8c60a090a-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.771466 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0936b89-101b-4e57-81e5-756361104037-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a0936b89-101b-4e57-81e5-756361104037" (UID: "a0936b89-101b-4e57-81e5-756361104037"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.771673 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e6d363c-70c9-4659-aa69-dc91e3f86e07-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5e6d363c-70c9-4659-aa69-dc91e3f86e07" (UID: "5e6d363c-70c9-4659-aa69-dc91e3f86e07"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.772196 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4af08fef-0f95-4448-bd66-b84589609611-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4af08fef-0f95-4448-bd66-b84589609611" (UID: "4af08fef-0f95-4448-bd66-b84589609611"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.776214 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0936b89-101b-4e57-81e5-756361104037-kube-api-access-jd8lt" (OuterVolumeSpecName: "kube-api-access-jd8lt") pod "a0936b89-101b-4e57-81e5-756361104037" (UID: "a0936b89-101b-4e57-81e5-756361104037"). InnerVolumeSpecName "kube-api-access-jd8lt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.779274 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4af08fef-0f95-4448-bd66-b84589609611-kube-api-access-4dxn2" (OuterVolumeSpecName: "kube-api-access-4dxn2") pod "4af08fef-0f95-4448-bd66-b84589609611" (UID: "4af08fef-0f95-4448-bd66-b84589609611"). InnerVolumeSpecName "kube-api-access-4dxn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.779402 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e6d363c-70c9-4659-aa69-dc91e3f86e07-kube-api-access-g747b" (OuterVolumeSpecName: "kube-api-access-g747b") pod "5e6d363c-70c9-4659-aa69-dc91e3f86e07" (UID: "5e6d363c-70c9-4659-aa69-dc91e3f86e07"). InnerVolumeSpecName "kube-api-access-g747b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.874509 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jd8lt\" (UniqueName: \"kubernetes.io/projected/a0936b89-101b-4e57-81e5-756361104037-kube-api-access-jd8lt\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.874567 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g747b\" (UniqueName: \"kubernetes.io/projected/5e6d363c-70c9-4659-aa69-dc91e3f86e07-kube-api-access-g747b\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.874588 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0936b89-101b-4e57-81e5-756361104037-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.874600 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4af08fef-0f95-4448-bd66-b84589609611-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.874612 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4dxn2\" (UniqueName: \"kubernetes.io/projected/4af08fef-0f95-4448-bd66-b84589609611-kube-api-access-4dxn2\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:18 crc kubenswrapper[5016]: I1211 10:55:18.874624 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e6d363c-70c9-4659-aa69-dc91e3f86e07-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.000183 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5rlq6" event={"ID":"5e6d363c-70c9-4659-aa69-dc91e3f86e07","Type":"ContainerDied","Data":"cb2e77890a19e7eb6ab1992a7b74b301d08a783707b8fdbc168479ff0de2fe8e"} Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.000514 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb2e77890a19e7eb6ab1992a7b74b301d08a783707b8fdbc168479ff0de2fe8e" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.000596 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5rlq6" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.032282 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-49xxd" event={"ID":"a0936b89-101b-4e57-81e5-756361104037","Type":"ContainerDied","Data":"1b04e658112f5c91cee56d90bf95eb72e7a218486bc6a75f7e02c78348f2ae7c"} Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.032332 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b04e658112f5c91cee56d90bf95eb72e7a218486bc6a75f7e02c78348f2ae7c" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.032416 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-49xxd" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.048423 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5ecb-account-create-update-t2kgc" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.048618 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5ecb-account-create-update-t2kgc" event={"ID":"c798030e-6efe-4b81-9d60-96dc199e420a","Type":"ContainerDied","Data":"44ed259723088be2015030f7e5a70b57ccac584c000b5add80f8fc39333fcee4"} Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.048693 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="44ed259723088be2015030f7e5a70b57ccac584c000b5add80f8fc39333fcee4" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.058511 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-1dd0-account-create-update-26v9f" event={"ID":"4af08fef-0f95-4448-bd66-b84589609611","Type":"ContainerDied","Data":"f5a1848e69fc577ffc144fe3b47e84be408fec686a8fbe61e654240fc05ff35b"} Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.058568 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5a1848e69fc577ffc144fe3b47e84be408fec686a8fbe61e654240fc05ff35b" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.058671 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-1dd0-account-create-update-26v9f" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.079319 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-9c93-account-create-update-kwv2t" event={"ID":"e23b5f15-b825-4869-a7f8-93a8c60a090a","Type":"ContainerDied","Data":"2aabdb16619e6b2d3a7f256727725496bb831ad8eb1194f9edc069591ddac49f"} Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.079361 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2aabdb16619e6b2d3a7f256727725496bb831ad8eb1194f9edc069591ddac49f" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.079446 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-9c93-account-create-update-kwv2t" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.100538 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-nxzgl" event={"ID":"c3710c5e-0042-4f48-861d-c793cf81e42f","Type":"ContainerDied","Data":"6484b6e8a338ded84a4f41b9449bdeb9f5c10162f3decd73121d4a940e95656f"} Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.100573 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-nxzgl" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.100576 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6484b6e8a338ded84a4f41b9449bdeb9f5c10162f3decd73121d4a940e95656f" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.490104 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f9cf295-ec3d-484a-b0ba-9577386e8ef1" path="/var/lib/kubelet/pods/2f9cf295-ec3d-484a-b0ba-9577386e8ef1/volumes" Dec 11 10:55:19 crc kubenswrapper[5016]: I1211 10:55:19.630079 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.217158 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-7jsb2"] Dec 11 10:55:20 crc kubenswrapper[5016]: E1211 10:55:20.219131 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4af08fef-0f95-4448-bd66-b84589609611" containerName="mariadb-account-create-update" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219165 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="4af08fef-0f95-4448-bd66-b84589609611" containerName="mariadb-account-create-update" Dec 11 10:55:20 crc kubenswrapper[5016]: E1211 10:55:20.219180 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e6d363c-70c9-4659-aa69-dc91e3f86e07" containerName="mariadb-database-create" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219190 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e6d363c-70c9-4659-aa69-dc91e3f86e07" containerName="mariadb-database-create" Dec 11 10:55:20 crc kubenswrapper[5016]: E1211 10:55:20.219209 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f9cf295-ec3d-484a-b0ba-9577386e8ef1" containerName="dnsmasq-dns" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219217 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f9cf295-ec3d-484a-b0ba-9577386e8ef1" containerName="dnsmasq-dns" Dec 11 10:55:20 crc kubenswrapper[5016]: E1211 10:55:20.219234 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3710c5e-0042-4f48-861d-c793cf81e42f" containerName="mariadb-database-create" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219243 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3710c5e-0042-4f48-861d-c793cf81e42f" containerName="mariadb-database-create" Dec 11 10:55:20 crc kubenswrapper[5016]: E1211 10:55:20.219258 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f9cf295-ec3d-484a-b0ba-9577386e8ef1" containerName="init" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219265 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f9cf295-ec3d-484a-b0ba-9577386e8ef1" containerName="init" Dec 11 10:55:20 crc kubenswrapper[5016]: E1211 10:55:20.219289 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0936b89-101b-4e57-81e5-756361104037" containerName="mariadb-database-create" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219298 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0936b89-101b-4e57-81e5-756361104037" containerName="mariadb-database-create" Dec 11 10:55:20 crc kubenswrapper[5016]: E1211 10:55:20.219323 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c798030e-6efe-4b81-9d60-96dc199e420a" containerName="mariadb-account-create-update" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219331 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="c798030e-6efe-4b81-9d60-96dc199e420a" containerName="mariadb-account-create-update" Dec 11 10:55:20 crc kubenswrapper[5016]: E1211 10:55:20.219346 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e23b5f15-b825-4869-a7f8-93a8c60a090a" containerName="mariadb-account-create-update" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219355 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e23b5f15-b825-4869-a7f8-93a8c60a090a" containerName="mariadb-account-create-update" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219567 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="c798030e-6efe-4b81-9d60-96dc199e420a" containerName="mariadb-account-create-update" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219603 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3710c5e-0042-4f48-861d-c793cf81e42f" containerName="mariadb-database-create" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219634 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="e23b5f15-b825-4869-a7f8-93a8c60a090a" containerName="mariadb-account-create-update" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219652 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0936b89-101b-4e57-81e5-756361104037" containerName="mariadb-database-create" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219667 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="4af08fef-0f95-4448-bd66-b84589609611" containerName="mariadb-account-create-update" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219684 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f9cf295-ec3d-484a-b0ba-9577386e8ef1" containerName="dnsmasq-dns" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.219707 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e6d363c-70c9-4659-aa69-dc91e3f86e07" containerName="mariadb-database-create" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.220425 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.223677 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.223877 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-wgjtw" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.226438 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-7jsb2"] Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.304352 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-db-sync-config-data\") pod \"glance-db-sync-7jsb2\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.305672 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdhsh\" (UniqueName: \"kubernetes.io/projected/b54c94f2-993f-4595-9878-b14557d8bb18-kube-api-access-tdhsh\") pod \"glance-db-sync-7jsb2\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.305734 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-combined-ca-bundle\") pod \"glance-db-sync-7jsb2\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.305933 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-config-data\") pod \"glance-db-sync-7jsb2\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.407312 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-combined-ca-bundle\") pod \"glance-db-sync-7jsb2\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.407497 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-config-data\") pod \"glance-db-sync-7jsb2\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.408128 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-db-sync-config-data\") pod \"glance-db-sync-7jsb2\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.408203 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdhsh\" (UniqueName: \"kubernetes.io/projected/b54c94f2-993f-4595-9878-b14557d8bb18-kube-api-access-tdhsh\") pod \"glance-db-sync-7jsb2\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.411718 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-db-sync-config-data\") pod \"glance-db-sync-7jsb2\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.411735 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-config-data\") pod \"glance-db-sync-7jsb2\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.420142 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-combined-ca-bundle\") pod \"glance-db-sync-7jsb2\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.429851 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdhsh\" (UniqueName: \"kubernetes.io/projected/b54c94f2-993f-4595-9878-b14557d8bb18-kube-api-access-tdhsh\") pod \"glance-db-sync-7jsb2\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.537963 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.748860 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Dec 11 10:55:20 crc kubenswrapper[5016]: I1211 10:55:20.836360 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Dec 11 10:55:21 crc kubenswrapper[5016]: I1211 10:55:21.074447 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-7jsb2"] Dec 11 10:55:21 crc kubenswrapper[5016]: W1211 10:55:21.076881 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb54c94f2_993f_4595_9878_b14557d8bb18.slice/crio-6272d3331dbfc24fe82f1334bb04b6dbd07c610a15e8b9255cd315560618315b WatchSource:0}: Error finding container 6272d3331dbfc24fe82f1334bb04b6dbd07c610a15e8b9255cd315560618315b: Status 404 returned error can't find the container with id 6272d3331dbfc24fe82f1334bb04b6dbd07c610a15e8b9255cd315560618315b Dec 11 10:55:21 crc kubenswrapper[5016]: I1211 10:55:21.126749 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-7jsb2" event={"ID":"b54c94f2-993f-4595-9878-b14557d8bb18","Type":"ContainerStarted","Data":"6272d3331dbfc24fe82f1334bb04b6dbd07c610a15e8b9255cd315560618315b"} Dec 11 10:55:23 crc kubenswrapper[5016]: I1211 10:55:23.794760 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:23 crc kubenswrapper[5016]: E1211 10:55:23.795007 5016 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 11 10:55:23 crc kubenswrapper[5016]: E1211 10:55:23.795408 5016 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 11 10:55:23 crc kubenswrapper[5016]: E1211 10:55:23.795468 5016 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift podName:a24f6c06-a757-4b4b-9361-e87f07af2ca8 nodeName:}" failed. No retries permitted until 2025-12-11 10:55:39.795451559 +0000 UTC m=+1256.614011128 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift") pod "swift-storage-0" (UID: "a24f6c06-a757-4b4b-9361-e87f07af2ca8") : configmap "swift-ring-files" not found Dec 11 10:55:26 crc kubenswrapper[5016]: I1211 10:55:26.170450 5016 generic.go:334] "Generic (PLEG): container finished" podID="8d68a71e-cbcb-4ce9-bb01-3b48154074a4" containerID="58a8aee13fa31d487dcf96e04a18fe6d63bd3250ad5f18e19b9bacefac0b430e" exitCode=0 Dec 11 10:55:26 crc kubenswrapper[5016]: I1211 10:55:26.170801 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-cfbpr" event={"ID":"8d68a71e-cbcb-4ce9-bb01-3b48154074a4","Type":"ContainerDied","Data":"58a8aee13fa31d487dcf96e04a18fe6d63bd3250ad5f18e19b9bacefac0b430e"} Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.540984 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.561573 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxprh\" (UniqueName: \"kubernetes.io/projected/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-kube-api-access-bxprh\") pod \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.561616 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-swiftconf\") pod \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.561660 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-dispersionconf\") pod \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.561687 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-ring-data-devices\") pod \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.561814 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-scripts\") pod \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.561847 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-etc-swift\") pod \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.561887 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-combined-ca-bundle\") pod \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\" (UID: \"8d68a71e-cbcb-4ce9-bb01-3b48154074a4\") " Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.563772 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "8d68a71e-cbcb-4ce9-bb01-3b48154074a4" (UID: "8d68a71e-cbcb-4ce9-bb01-3b48154074a4"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.564271 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "8d68a71e-cbcb-4ce9-bb01-3b48154074a4" (UID: "8d68a71e-cbcb-4ce9-bb01-3b48154074a4"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.570269 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-kube-api-access-bxprh" (OuterVolumeSpecName: "kube-api-access-bxprh") pod "8d68a71e-cbcb-4ce9-bb01-3b48154074a4" (UID: "8d68a71e-cbcb-4ce9-bb01-3b48154074a4"). InnerVolumeSpecName "kube-api-access-bxprh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.572741 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "8d68a71e-cbcb-4ce9-bb01-3b48154074a4" (UID: "8d68a71e-cbcb-4ce9-bb01-3b48154074a4"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.591478 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-scripts" (OuterVolumeSpecName: "scripts") pod "8d68a71e-cbcb-4ce9-bb01-3b48154074a4" (UID: "8d68a71e-cbcb-4ce9-bb01-3b48154074a4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.594401 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8d68a71e-cbcb-4ce9-bb01-3b48154074a4" (UID: "8d68a71e-cbcb-4ce9-bb01-3b48154074a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.600746 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "8d68a71e-cbcb-4ce9-bb01-3b48154074a4" (UID: "8d68a71e-cbcb-4ce9-bb01-3b48154074a4"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.664389 5016 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-swiftconf\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.664425 5016 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-dispersionconf\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.664437 5016 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-ring-data-devices\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.664449 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.664458 5016 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-etc-swift\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.664467 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:27 crc kubenswrapper[5016]: I1211 10:55:27.664476 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxprh\" (UniqueName: \"kubernetes.io/projected/8d68a71e-cbcb-4ce9-bb01-3b48154074a4-kube-api-access-bxprh\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:28 crc kubenswrapper[5016]: I1211 10:55:28.191959 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-cfbpr" event={"ID":"8d68a71e-cbcb-4ce9-bb01-3b48154074a4","Type":"ContainerDied","Data":"f966a10a8f9aa0e7d91bf413e55e67ba3a483207d805e0ffee0b9db93e869a46"} Dec 11 10:55:28 crc kubenswrapper[5016]: I1211 10:55:28.192258 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f966a10a8f9aa0e7d91bf413e55e67ba3a483207d805e0ffee0b9db93e869a46" Dec 11 10:55:28 crc kubenswrapper[5016]: I1211 10:55:28.192325 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-cfbpr" Dec 11 10:55:30 crc kubenswrapper[5016]: I1211 10:55:30.603169 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-g76kk" podUID="cf0694a8-c7ff-429f-a52f-5885a8dcb3ac" containerName="ovn-controller" probeResult="failure" output=< Dec 11 10:55:30 crc kubenswrapper[5016]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 11 10:55:30 crc kubenswrapper[5016]: > Dec 11 10:55:30 crc kubenswrapper[5016]: I1211 10:55:30.716172 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:55:30 crc kubenswrapper[5016]: I1211 10:55:30.747576 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-vbtwd" Dec 11 10:55:30 crc kubenswrapper[5016]: I1211 10:55:30.969605 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-g76kk-config-g2tn6"] Dec 11 10:55:30 crc kubenswrapper[5016]: E1211 10:55:30.969992 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d68a71e-cbcb-4ce9-bb01-3b48154074a4" containerName="swift-ring-rebalance" Dec 11 10:55:30 crc kubenswrapper[5016]: I1211 10:55:30.970005 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d68a71e-cbcb-4ce9-bb01-3b48154074a4" containerName="swift-ring-rebalance" Dec 11 10:55:30 crc kubenswrapper[5016]: I1211 10:55:30.970183 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d68a71e-cbcb-4ce9-bb01-3b48154074a4" containerName="swift-ring-rebalance" Dec 11 10:55:30 crc kubenswrapper[5016]: I1211 10:55:30.970772 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:30 crc kubenswrapper[5016]: I1211 10:55:30.974632 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Dec 11 10:55:30 crc kubenswrapper[5016]: I1211 10:55:30.996554 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-g76kk-config-g2tn6"] Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.024517 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fd113de0-5a6a-49df-86e1-09b732ec9893-additional-scripts\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.025209 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-run\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.025330 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-run-ovn\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.025417 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fd113de0-5a6a-49df-86e1-09b732ec9893-scripts\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.025549 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n62bj\" (UniqueName: \"kubernetes.io/projected/fd113de0-5a6a-49df-86e1-09b732ec9893-kube-api-access-n62bj\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.025688 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-log-ovn\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.127381 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fd113de0-5a6a-49df-86e1-09b732ec9893-additional-scripts\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.127468 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-run\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.127494 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-run-ovn\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.127514 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fd113de0-5a6a-49df-86e1-09b732ec9893-scripts\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.127561 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n62bj\" (UniqueName: \"kubernetes.io/projected/fd113de0-5a6a-49df-86e1-09b732ec9893-kube-api-access-n62bj\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.127599 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-log-ovn\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.127936 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-log-ovn\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.128041 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-run-ovn\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.128094 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-run\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.128842 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fd113de0-5a6a-49df-86e1-09b732ec9893-additional-scripts\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.130117 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fd113de0-5a6a-49df-86e1-09b732ec9893-scripts\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.168021 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n62bj\" (UniqueName: \"kubernetes.io/projected/fd113de0-5a6a-49df-86e1-09b732ec9893-kube-api-access-n62bj\") pod \"ovn-controller-g76kk-config-g2tn6\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:31 crc kubenswrapper[5016]: I1211 10:55:31.296591 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:32 crc kubenswrapper[5016]: I1211 10:55:32.227739 5016 generic.go:334] "Generic (PLEG): container finished" podID="e46a21b8-75eb-49ac-8d08-0acaaa8fac37" containerID="ccccf842dadabad37bad8683166c7169076d2baa22d9ea9bc6e44216e5739d4e" exitCode=0 Dec 11 10:55:32 crc kubenswrapper[5016]: I1211 10:55:32.227837 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e46a21b8-75eb-49ac-8d08-0acaaa8fac37","Type":"ContainerDied","Data":"ccccf842dadabad37bad8683166c7169076d2baa22d9ea9bc6e44216e5739d4e"} Dec 11 10:55:35 crc kubenswrapper[5016]: I1211 10:55:35.602341 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-g76kk" podUID="cf0694a8-c7ff-429f-a52f-5885a8dcb3ac" containerName="ovn-controller" probeResult="failure" output=< Dec 11 10:55:35 crc kubenswrapper[5016]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 11 10:55:35 crc kubenswrapper[5016]: > Dec 11 10:55:38 crc kubenswrapper[5016]: I1211 10:55:38.900157 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-g76kk-config-g2tn6"] Dec 11 10:55:39 crc kubenswrapper[5016]: I1211 10:55:39.288365 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e46a21b8-75eb-49ac-8d08-0acaaa8fac37","Type":"ContainerStarted","Data":"8a88195eb63f082092d6edb63948ca9def10cea29875ffaae2b346bb818ab3dd"} Dec 11 10:55:39 crc kubenswrapper[5016]: I1211 10:55:39.288866 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 11 10:55:39 crc kubenswrapper[5016]: I1211 10:55:39.291091 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-g76kk-config-g2tn6" event={"ID":"fd113de0-5a6a-49df-86e1-09b732ec9893","Type":"ContainerStarted","Data":"8d5da64c815db5f492b48a8f316a82880f20a3ec15a9356aa2b6b239eda2a00b"} Dec 11 10:55:39 crc kubenswrapper[5016]: I1211 10:55:39.293653 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-7jsb2" event={"ID":"b54c94f2-993f-4595-9878-b14557d8bb18","Type":"ContainerStarted","Data":"fa6774202c1e09cb4d25a552c564bacfd6372911d02c77d0ed7c3078238d169b"} Dec 11 10:55:39 crc kubenswrapper[5016]: I1211 10:55:39.318594 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=44.166390384 podStartE2EDuration="1m29.318578244s" podCreationTimestamp="2025-12-11 10:54:10 +0000 UTC" firstStartedPulling="2025-12-11 10:54:13.018154661 +0000 UTC m=+1169.836714240" lastFinishedPulling="2025-12-11 10:54:58.170342521 +0000 UTC m=+1214.988902100" observedRunningTime="2025-12-11 10:55:39.312092195 +0000 UTC m=+1256.130651784" watchObservedRunningTime="2025-12-11 10:55:39.318578244 +0000 UTC m=+1256.137137823" Dec 11 10:55:39 crc kubenswrapper[5016]: I1211 10:55:39.336544 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-7jsb2" podStartSLOduration=1.909786652 podStartE2EDuration="19.336526883s" podCreationTimestamp="2025-12-11 10:55:20 +0000 UTC" firstStartedPulling="2025-12-11 10:55:21.083167827 +0000 UTC m=+1237.901727406" lastFinishedPulling="2025-12-11 10:55:38.509908058 +0000 UTC m=+1255.328467637" observedRunningTime="2025-12-11 10:55:39.33025908 +0000 UTC m=+1256.148818649" watchObservedRunningTime="2025-12-11 10:55:39.336526883 +0000 UTC m=+1256.155086462" Dec 11 10:55:39 crc kubenswrapper[5016]: I1211 10:55:39.895905 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:39 crc kubenswrapper[5016]: I1211 10:55:39.902575 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a24f6c06-a757-4b4b-9361-e87f07af2ca8-etc-swift\") pod \"swift-storage-0\" (UID: \"a24f6c06-a757-4b4b-9361-e87f07af2ca8\") " pod="openstack/swift-storage-0" Dec 11 10:55:40 crc kubenswrapper[5016]: I1211 10:55:40.071745 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 11 10:55:40 crc kubenswrapper[5016]: I1211 10:55:40.306546 5016 generic.go:334] "Generic (PLEG): container finished" podID="fd113de0-5a6a-49df-86e1-09b732ec9893" containerID="56ca242e72da8cae9557a994f452d92971afc8299dd507cabe42ead81b910d7f" exitCode=0 Dec 11 10:55:40 crc kubenswrapper[5016]: I1211 10:55:40.306622 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-g76kk-config-g2tn6" event={"ID":"fd113de0-5a6a-49df-86e1-09b732ec9893","Type":"ContainerDied","Data":"56ca242e72da8cae9557a994f452d92971afc8299dd507cabe42ead81b910d7f"} Dec 11 10:55:40 crc kubenswrapper[5016]: I1211 10:55:40.602972 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-g76kk" Dec 11 10:55:40 crc kubenswrapper[5016]: I1211 10:55:40.684560 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.315355 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"0d109b86cb9111230a44d0195873a5a5c42b11cf74a733e7ce1450c6c78ea537"} Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.625624 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.730871 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-log-ovn\") pod \"fd113de0-5a6a-49df-86e1-09b732ec9893\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.730952 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fd113de0-5a6a-49df-86e1-09b732ec9893-additional-scripts\") pod \"fd113de0-5a6a-49df-86e1-09b732ec9893\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.731044 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "fd113de0-5a6a-49df-86e1-09b732ec9893" (UID: "fd113de0-5a6a-49df-86e1-09b732ec9893"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.731096 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fd113de0-5a6a-49df-86e1-09b732ec9893-scripts\") pod \"fd113de0-5a6a-49df-86e1-09b732ec9893\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.731184 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-run-ovn\") pod \"fd113de0-5a6a-49df-86e1-09b732ec9893\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.731281 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "fd113de0-5a6a-49df-86e1-09b732ec9893" (UID: "fd113de0-5a6a-49df-86e1-09b732ec9893"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.731360 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-run\") pod \"fd113de0-5a6a-49df-86e1-09b732ec9893\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.731385 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n62bj\" (UniqueName: \"kubernetes.io/projected/fd113de0-5a6a-49df-86e1-09b732ec9893-kube-api-access-n62bj\") pod \"fd113de0-5a6a-49df-86e1-09b732ec9893\" (UID: \"fd113de0-5a6a-49df-86e1-09b732ec9893\") " Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.731433 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-run" (OuterVolumeSpecName: "var-run") pod "fd113de0-5a6a-49df-86e1-09b732ec9893" (UID: "fd113de0-5a6a-49df-86e1-09b732ec9893"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.731863 5016 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-log-ovn\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.731888 5016 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.731898 5016 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fd113de0-5a6a-49df-86e1-09b732ec9893-var-run\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.732062 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd113de0-5a6a-49df-86e1-09b732ec9893-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "fd113de0-5a6a-49df-86e1-09b732ec9893" (UID: "fd113de0-5a6a-49df-86e1-09b732ec9893"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.732265 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd113de0-5a6a-49df-86e1-09b732ec9893-scripts" (OuterVolumeSpecName: "scripts") pod "fd113de0-5a6a-49df-86e1-09b732ec9893" (UID: "fd113de0-5a6a-49df-86e1-09b732ec9893"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.738240 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd113de0-5a6a-49df-86e1-09b732ec9893-kube-api-access-n62bj" (OuterVolumeSpecName: "kube-api-access-n62bj") pod "fd113de0-5a6a-49df-86e1-09b732ec9893" (UID: "fd113de0-5a6a-49df-86e1-09b732ec9893"). InnerVolumeSpecName "kube-api-access-n62bj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.833773 5016 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fd113de0-5a6a-49df-86e1-09b732ec9893-additional-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.833808 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fd113de0-5a6a-49df-86e1-09b732ec9893-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:41 crc kubenswrapper[5016]: I1211 10:55:41.833820 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n62bj\" (UniqueName: \"kubernetes.io/projected/fd113de0-5a6a-49df-86e1-09b732ec9893-kube-api-access-n62bj\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:42 crc kubenswrapper[5016]: I1211 10:55:42.335422 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-g76kk-config-g2tn6" event={"ID":"fd113de0-5a6a-49df-86e1-09b732ec9893","Type":"ContainerDied","Data":"8d5da64c815db5f492b48a8f316a82880f20a3ec15a9356aa2b6b239eda2a00b"} Dec 11 10:55:42 crc kubenswrapper[5016]: I1211 10:55:42.335465 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d5da64c815db5f492b48a8f316a82880f20a3ec15a9356aa2b6b239eda2a00b" Dec 11 10:55:42 crc kubenswrapper[5016]: I1211 10:55:42.335525 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-g76kk-config-g2tn6" Dec 11 10:55:42 crc kubenswrapper[5016]: I1211 10:55:42.340169 5016 generic.go:334] "Generic (PLEG): container finished" podID="313107c9-4bb0-49ad-a67b-7f2e4ae09753" containerID="b2f2efa6faa297b8f0bc3ba17f76e2c24b2691d9c6af06345d0d0383aaacc499" exitCode=0 Dec 11 10:55:42 crc kubenswrapper[5016]: I1211 10:55:42.340208 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"313107c9-4bb0-49ad-a67b-7f2e4ae09753","Type":"ContainerDied","Data":"b2f2efa6faa297b8f0bc3ba17f76e2c24b2691d9c6af06345d0d0383aaacc499"} Dec 11 10:55:42 crc kubenswrapper[5016]: I1211 10:55:42.728683 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-g76kk-config-g2tn6"] Dec 11 10:55:42 crc kubenswrapper[5016]: I1211 10:55:42.734561 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-g76kk-config-g2tn6"] Dec 11 10:55:42 crc kubenswrapper[5016]: I1211 10:55:42.932296 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:55:42 crc kubenswrapper[5016]: I1211 10:55:42.932379 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:55:43 crc kubenswrapper[5016]: I1211 10:55:43.354603 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"313107c9-4bb0-49ad-a67b-7f2e4ae09753","Type":"ContainerStarted","Data":"64b5accdc2fd0c92a017d393d7f08770227f2a59966376e256f9e6643bdc204d"} Dec 11 10:55:43 crc kubenswrapper[5016]: I1211 10:55:43.355884 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:55:43 crc kubenswrapper[5016]: I1211 10:55:43.379247 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371943.475548 podStartE2EDuration="1m33.379228151s" podCreationTimestamp="2025-12-11 10:54:10 +0000 UTC" firstStartedPulling="2025-12-11 10:54:12.383768569 +0000 UTC m=+1169.202328148" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:55:43.377833037 +0000 UTC m=+1260.196392616" watchObservedRunningTime="2025-12-11 10:55:43.379228151 +0000 UTC m=+1260.197787730" Dec 11 10:55:43 crc kubenswrapper[5016]: I1211 10:55:43.490471 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd113de0-5a6a-49df-86e1-09b732ec9893" path="/var/lib/kubelet/pods/fd113de0-5a6a-49df-86e1-09b732ec9893/volumes" Dec 11 10:55:44 crc kubenswrapper[5016]: I1211 10:55:44.365702 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"d08938b1e4cf582548bc6a887c39f6b8707ea94ad85f8176a777fd1528d4f808"} Dec 11 10:55:44 crc kubenswrapper[5016]: I1211 10:55:44.366095 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"8b9ec04e145e32b50dddbf8d4614845d618d91969abafbc813ef3057579788b7"} Dec 11 10:55:45 crc kubenswrapper[5016]: I1211 10:55:45.373955 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"8fdd40c497c0f3db2720ae402d9f196803bbf27eac0207e88f11ea7dffc21ac1"} Dec 11 10:55:45 crc kubenswrapper[5016]: I1211 10:55:45.374003 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"812cb3306fc621325f08fb507197d4d88e178fdf89884529fc995f14866d852b"} Dec 11 10:55:46 crc kubenswrapper[5016]: I1211 10:55:46.386722 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"581a50f8f25fce658bbf1f0f3801d98c2d867baa19f0fc85ffd73604c6c50765"} Dec 11 10:55:47 crc kubenswrapper[5016]: I1211 10:55:47.398072 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"444e94dd30ac880c51b34ebc1e99992225d80c627140d03799f2af105c063680"} Dec 11 10:55:47 crc kubenswrapper[5016]: I1211 10:55:47.398123 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"fc6a6109b805b37fe5d87570aff3a35b2407dba2ab7e9daa3b67f5d278e3d9e0"} Dec 11 10:55:47 crc kubenswrapper[5016]: I1211 10:55:47.398135 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"b40e0acdba35fa0ea2e250ee00aeb1f2a3dbd0b0718552e3c6e2d53fb7cf5439"} Dec 11 10:55:49 crc kubenswrapper[5016]: I1211 10:55:49.421405 5016 generic.go:334] "Generic (PLEG): container finished" podID="b54c94f2-993f-4595-9878-b14557d8bb18" containerID="fa6774202c1e09cb4d25a552c564bacfd6372911d02c77d0ed7c3078238d169b" exitCode=0 Dec 11 10:55:49 crc kubenswrapper[5016]: I1211 10:55:49.421616 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-7jsb2" event={"ID":"b54c94f2-993f-4595-9878-b14557d8bb18","Type":"ContainerDied","Data":"fa6774202c1e09cb4d25a552c564bacfd6372911d02c77d0ed7c3078238d169b"} Dec 11 10:55:49 crc kubenswrapper[5016]: I1211 10:55:49.465584 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"0f889ed8ec6939c6b70fcc235983281cd8b4ce35248822109b1bfe1acb5c8f6f"} Dec 11 10:55:49 crc kubenswrapper[5016]: I1211 10:55:49.465639 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"e0944d910361edfdf04aba426d5afd9089ff5aaee5a470fe257b683eb88bca97"} Dec 11 10:55:49 crc kubenswrapper[5016]: I1211 10:55:49.465665 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"3ebe313076dfead00d9ce8f1d58a56e3f30afd16251bba534d3d90a9f7f46d62"} Dec 11 10:55:50 crc kubenswrapper[5016]: I1211 10:55:50.483914 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"72b558c3a81a0ae57c4453e20fb43e44fe58cef0f538015da2dbed79d91d5cc1"} Dec 11 10:55:50 crc kubenswrapper[5016]: I1211 10:55:50.484260 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"cc184587821912480d49355e2a9795e136d31a2485f36a5495b5e78c993033aa"} Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.006612 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.091242 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-db-sync-config-data\") pod \"b54c94f2-993f-4595-9878-b14557d8bb18\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.091303 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-config-data\") pod \"b54c94f2-993f-4595-9878-b14557d8bb18\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.091358 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-combined-ca-bundle\") pod \"b54c94f2-993f-4595-9878-b14557d8bb18\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.091443 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdhsh\" (UniqueName: \"kubernetes.io/projected/b54c94f2-993f-4595-9878-b14557d8bb18-kube-api-access-tdhsh\") pod \"b54c94f2-993f-4595-9878-b14557d8bb18\" (UID: \"b54c94f2-993f-4595-9878-b14557d8bb18\") " Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.101595 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b54c94f2-993f-4595-9878-b14557d8bb18-kube-api-access-tdhsh" (OuterVolumeSpecName: "kube-api-access-tdhsh") pod "b54c94f2-993f-4595-9878-b14557d8bb18" (UID: "b54c94f2-993f-4595-9878-b14557d8bb18"). InnerVolumeSpecName "kube-api-access-tdhsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.109161 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b54c94f2-993f-4595-9878-b14557d8bb18" (UID: "b54c94f2-993f-4595-9878-b14557d8bb18"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.138139 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b54c94f2-993f-4595-9878-b14557d8bb18" (UID: "b54c94f2-993f-4595-9878-b14557d8bb18"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.152781 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-config-data" (OuterVolumeSpecName: "config-data") pod "b54c94f2-993f-4595-9878-b14557d8bb18" (UID: "b54c94f2-993f-4595-9878-b14557d8bb18"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.195109 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdhsh\" (UniqueName: \"kubernetes.io/projected/b54c94f2-993f-4595-9878-b14557d8bb18-kube-api-access-tdhsh\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.195632 5016 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.195646 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.195658 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b54c94f2-993f-4595-9878-b14557d8bb18-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.537886 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-7jsb2" event={"ID":"b54c94f2-993f-4595-9878-b14557d8bb18","Type":"ContainerDied","Data":"6272d3331dbfc24fe82f1334bb04b6dbd07c610a15e8b9255cd315560618315b"} Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.537963 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6272d3331dbfc24fe82f1334bb04b6dbd07c610a15e8b9255cd315560618315b" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.538290 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-7jsb2" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.557034 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"527c1201018a6459482bc7fbe474ce3f7c471d7ffbe0026c9fd2dc1d31f265da"} Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.557492 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"a24f6c06-a757-4b4b-9361-e87f07af2ca8","Type":"ContainerStarted","Data":"e663cd18fd3e89092d559db55613d97388cc568df3ae89f33bc2dd0ff611f301"} Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.590996 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.981897321 podStartE2EDuration="45.590973568s" podCreationTimestamp="2025-12-11 10:55:06 +0000 UTC" firstStartedPulling="2025-12-11 10:55:40.70865057 +0000 UTC m=+1257.527210149" lastFinishedPulling="2025-12-11 10:55:48.317726817 +0000 UTC m=+1265.136286396" observedRunningTime="2025-12-11 10:55:51.589929472 +0000 UTC m=+1268.408489071" watchObservedRunningTime="2025-12-11 10:55:51.590973568 +0000 UTC m=+1268.409533147" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.937118 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-pswpw"] Dec 11 10:55:51 crc kubenswrapper[5016]: E1211 10:55:51.937581 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b54c94f2-993f-4595-9878-b14557d8bb18" containerName="glance-db-sync" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.937609 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="b54c94f2-993f-4595-9878-b14557d8bb18" containerName="glance-db-sync" Dec 11 10:55:51 crc kubenswrapper[5016]: E1211 10:55:51.937630 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd113de0-5a6a-49df-86e1-09b732ec9893" containerName="ovn-config" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.937639 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd113de0-5a6a-49df-86e1-09b732ec9893" containerName="ovn-config" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.937834 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="b54c94f2-993f-4595-9878-b14557d8bb18" containerName="glance-db-sync" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.937864 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd113de0-5a6a-49df-86e1-09b732ec9893" containerName="ovn-config" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.942775 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.946127 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Dec 11 10:55:51 crc kubenswrapper[5016]: I1211 10:55:51.955741 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-pswpw"] Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.015717 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-pswpw"] Dec 11 10:55:52 crc kubenswrapper[5016]: E1211 10:55:52.016577 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc dns-swift-storage-0 kube-api-access-zwm7b ovsdbserver-nb ovsdbserver-sb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" podUID="cfc791a2-9b91-493e-95a4-9df77ffbf088" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.016754 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.016822 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwm7b\" (UniqueName: \"kubernetes.io/projected/cfc791a2-9b91-493e-95a4-9df77ffbf088-kube-api-access-zwm7b\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.016843 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.016869 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.016886 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.016956 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-config\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.044775 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-79znm"] Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.053167 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.060705 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-79znm"] Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.119541 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.119630 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwm7b\" (UniqueName: \"kubernetes.io/projected/cfc791a2-9b91-493e-95a4-9df77ffbf088-kube-api-access-zwm7b\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.119660 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.119701 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.119727 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.119999 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-config\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.120771 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.120780 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.122644 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-config\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.123231 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.123416 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.146205 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwm7b\" (UniqueName: \"kubernetes.io/projected/cfc791a2-9b91-493e-95a4-9df77ffbf088-kube-api-access-zwm7b\") pod \"dnsmasq-dns-5c79d794d7-pswpw\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.222170 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-config\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.222521 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.222553 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.222598 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-957k6\" (UniqueName: \"kubernetes.io/projected/61ddbb14-d8b7-4c38-a398-e0d93aba33db-kube-api-access-957k6\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.222623 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.222789 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.291210 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.324926 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.325102 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-config\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.325139 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.325176 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.325236 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-957k6\" (UniqueName: \"kubernetes.io/projected/61ddbb14-d8b7-4c38-a398-e0d93aba33db-kube-api-access-957k6\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.325269 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.326545 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.326635 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.326770 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-config\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.326900 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.326965 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.353029 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-957k6\" (UniqueName: \"kubernetes.io/projected/61ddbb14-d8b7-4c38-a398-e0d93aba33db-kube-api-access-957k6\") pod \"dnsmasq-dns-5f59b8f679-79znm\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.378224 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.598977 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.617775 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.736585 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-ovsdbserver-nb\") pod \"cfc791a2-9b91-493e-95a4-9df77ffbf088\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.736625 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-config\") pod \"cfc791a2-9b91-493e-95a4-9df77ffbf088\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.736649 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-dns-svc\") pod \"cfc791a2-9b91-493e-95a4-9df77ffbf088\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.736776 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-dns-swift-storage-0\") pod \"cfc791a2-9b91-493e-95a4-9df77ffbf088\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.736807 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-ovsdbserver-sb\") pod \"cfc791a2-9b91-493e-95a4-9df77ffbf088\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.736844 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwm7b\" (UniqueName: \"kubernetes.io/projected/cfc791a2-9b91-493e-95a4-9df77ffbf088-kube-api-access-zwm7b\") pod \"cfc791a2-9b91-493e-95a4-9df77ffbf088\" (UID: \"cfc791a2-9b91-493e-95a4-9df77ffbf088\") " Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.748518 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-config" (OuterVolumeSpecName: "config") pod "cfc791a2-9b91-493e-95a4-9df77ffbf088" (UID: "cfc791a2-9b91-493e-95a4-9df77ffbf088"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.748864 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cfc791a2-9b91-493e-95a4-9df77ffbf088" (UID: "cfc791a2-9b91-493e-95a4-9df77ffbf088"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.749189 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "cfc791a2-9b91-493e-95a4-9df77ffbf088" (UID: "cfc791a2-9b91-493e-95a4-9df77ffbf088"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.749470 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cfc791a2-9b91-493e-95a4-9df77ffbf088" (UID: "cfc791a2-9b91-493e-95a4-9df77ffbf088"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.752117 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cfc791a2-9b91-493e-95a4-9df77ffbf088" (UID: "cfc791a2-9b91-493e-95a4-9df77ffbf088"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.763003 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-sccfv"] Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.764234 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sccfv" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.765137 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfc791a2-9b91-493e-95a4-9df77ffbf088-kube-api-access-zwm7b" (OuterVolumeSpecName: "kube-api-access-zwm7b") pod "cfc791a2-9b91-493e-95a4-9df77ffbf088" (UID: "cfc791a2-9b91-493e-95a4-9df77ffbf088"). InnerVolumeSpecName "kube-api-access-zwm7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.795998 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-sccfv"] Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.839621 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03c01ad0-d6ea-441d-9c31-ad70526210fe-operator-scripts\") pod \"cinder-db-create-sccfv\" (UID: \"03c01ad0-d6ea-441d-9c31-ad70526210fe\") " pod="openstack/cinder-db-create-sccfv" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.839718 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mzbn\" (UniqueName: \"kubernetes.io/projected/03c01ad0-d6ea-441d-9c31-ad70526210fe-kube-api-access-6mzbn\") pod \"cinder-db-create-sccfv\" (UID: \"03c01ad0-d6ea-441d-9c31-ad70526210fe\") " pod="openstack/cinder-db-create-sccfv" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.839828 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.839844 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.839856 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.839864 5016 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.839873 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cfc791a2-9b91-493e-95a4-9df77ffbf088-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.839881 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwm7b\" (UniqueName: \"kubernetes.io/projected/cfc791a2-9b91-493e-95a4-9df77ffbf088-kube-api-access-zwm7b\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.929531 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-a34d-account-create-update-kc72d"] Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.930629 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-a34d-account-create-update-kc72d" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.934853 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.942852 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03c01ad0-d6ea-441d-9c31-ad70526210fe-operator-scripts\") pod \"cinder-db-create-sccfv\" (UID: \"03c01ad0-d6ea-441d-9c31-ad70526210fe\") " pod="openstack/cinder-db-create-sccfv" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.942952 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mzbn\" (UniqueName: \"kubernetes.io/projected/03c01ad0-d6ea-441d-9c31-ad70526210fe-kube-api-access-6mzbn\") pod \"cinder-db-create-sccfv\" (UID: \"03c01ad0-d6ea-441d-9c31-ad70526210fe\") " pod="openstack/cinder-db-create-sccfv" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.943672 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03c01ad0-d6ea-441d-9c31-ad70526210fe-operator-scripts\") pod \"cinder-db-create-sccfv\" (UID: \"03c01ad0-d6ea-441d-9c31-ad70526210fe\") " pod="openstack/cinder-db-create-sccfv" Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.955024 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-a34d-account-create-update-kc72d"] Dec 11 10:55:52 crc kubenswrapper[5016]: I1211 10:55:52.984802 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mzbn\" (UniqueName: \"kubernetes.io/projected/03c01ad0-d6ea-441d-9c31-ad70526210fe-kube-api-access-6mzbn\") pod \"cinder-db-create-sccfv\" (UID: \"03c01ad0-d6ea-441d-9c31-ad70526210fe\") " pod="openstack/cinder-db-create-sccfv" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.043784 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-v969h"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.044160 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37302689-affc-4cf7-87af-b83bb550b54f-operator-scripts\") pod \"cinder-a34d-account-create-update-kc72d\" (UID: \"37302689-affc-4cf7-87af-b83bb550b54f\") " pod="openstack/cinder-a34d-account-create-update-kc72d" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.044254 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gs2g6\" (UniqueName: \"kubernetes.io/projected/37302689-affc-4cf7-87af-b83bb550b54f-kube-api-access-gs2g6\") pod \"cinder-a34d-account-create-update-kc72d\" (UID: \"37302689-affc-4cf7-87af-b83bb550b54f\") " pod="openstack/cinder-a34d-account-create-update-kc72d" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.044922 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-v969h" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.072422 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-v969h"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.144208 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sccfv" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.145864 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37302689-affc-4cf7-87af-b83bb550b54f-operator-scripts\") pod \"cinder-a34d-account-create-update-kc72d\" (UID: \"37302689-affc-4cf7-87af-b83bb550b54f\") " pod="openstack/cinder-a34d-account-create-update-kc72d" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.145925 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7qts\" (UniqueName: \"kubernetes.io/projected/3617c283-48bc-48fb-8421-b67c914e54ed-kube-api-access-w7qts\") pod \"barbican-db-create-v969h\" (UID: \"3617c283-48bc-48fb-8421-b67c914e54ed\") " pod="openstack/barbican-db-create-v969h" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.145981 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3617c283-48bc-48fb-8421-b67c914e54ed-operator-scripts\") pod \"barbican-db-create-v969h\" (UID: \"3617c283-48bc-48fb-8421-b67c914e54ed\") " pod="openstack/barbican-db-create-v969h" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.146059 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gs2g6\" (UniqueName: \"kubernetes.io/projected/37302689-affc-4cf7-87af-b83bb550b54f-kube-api-access-gs2g6\") pod \"cinder-a34d-account-create-update-kc72d\" (UID: \"37302689-affc-4cf7-87af-b83bb550b54f\") " pod="openstack/cinder-a34d-account-create-update-kc72d" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.147348 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37302689-affc-4cf7-87af-b83bb550b54f-operator-scripts\") pod \"cinder-a34d-account-create-update-kc72d\" (UID: \"37302689-affc-4cf7-87af-b83bb550b54f\") " pod="openstack/cinder-a34d-account-create-update-kc72d" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.147417 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-d0d0-account-create-update-4d9n5"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.149684 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d0d0-account-create-update-4d9n5" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.164543 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d0d0-account-create-update-4d9n5"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.171125 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.188720 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gs2g6\" (UniqueName: \"kubernetes.io/projected/37302689-affc-4cf7-87af-b83bb550b54f-kube-api-access-gs2g6\") pod \"cinder-a34d-account-create-update-kc72d\" (UID: \"37302689-affc-4cf7-87af-b83bb550b54f\") " pod="openstack/cinder-a34d-account-create-update-kc72d" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.189423 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-ng4n6"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.191236 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.200520 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-g8dhv" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.200864 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.203054 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.203302 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.270493 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qds72\" (UniqueName: \"kubernetes.io/projected/f8d7f1ca-06ea-4138-bb6c-56fa633775b9-kube-api-access-qds72\") pod \"barbican-d0d0-account-create-update-4d9n5\" (UID: \"f8d7f1ca-06ea-4138-bb6c-56fa633775b9\") " pod="openstack/barbican-d0d0-account-create-update-4d9n5" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.270894 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8d7f1ca-06ea-4138-bb6c-56fa633775b9-operator-scripts\") pod \"barbican-d0d0-account-create-update-4d9n5\" (UID: \"f8d7f1ca-06ea-4138-bb6c-56fa633775b9\") " pod="openstack/barbican-d0d0-account-create-update-4d9n5" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.271337 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7qts\" (UniqueName: \"kubernetes.io/projected/3617c283-48bc-48fb-8421-b67c914e54ed-kube-api-access-w7qts\") pod \"barbican-db-create-v969h\" (UID: \"3617c283-48bc-48fb-8421-b67c914e54ed\") " pod="openstack/barbican-db-create-v969h" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.271479 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96177636-5cb3-41e8-bf8b-1f34597a85ac-config-data\") pod \"keystone-db-sync-ng4n6\" (UID: \"96177636-5cb3-41e8-bf8b-1f34597a85ac\") " pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.271583 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfp4q\" (UniqueName: \"kubernetes.io/projected/96177636-5cb3-41e8-bf8b-1f34597a85ac-kube-api-access-gfp4q\") pod \"keystone-db-sync-ng4n6\" (UID: \"96177636-5cb3-41e8-bf8b-1f34597a85ac\") " pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.275120 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-a34d-account-create-update-kc72d" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.285615 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3617c283-48bc-48fb-8421-b67c914e54ed-operator-scripts\") pod \"barbican-db-create-v969h\" (UID: \"3617c283-48bc-48fb-8421-b67c914e54ed\") " pod="openstack/barbican-db-create-v969h" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.285719 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96177636-5cb3-41e8-bf8b-1f34597a85ac-combined-ca-bundle\") pod \"keystone-db-sync-ng4n6\" (UID: \"96177636-5cb3-41e8-bf8b-1f34597a85ac\") " pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.293672 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3617c283-48bc-48fb-8421-b67c914e54ed-operator-scripts\") pod \"barbican-db-create-v969h\" (UID: \"3617c283-48bc-48fb-8421-b67c914e54ed\") " pod="openstack/barbican-db-create-v969h" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.309988 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-ng4n6"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.332986 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7qts\" (UniqueName: \"kubernetes.io/projected/3617c283-48bc-48fb-8421-b67c914e54ed-kube-api-access-w7qts\") pod \"barbican-db-create-v969h\" (UID: \"3617c283-48bc-48fb-8421-b67c914e54ed\") " pod="openstack/barbican-db-create-v969h" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.347894 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-79znm"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.425635 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-v969h" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.438884 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfp4q\" (UniqueName: \"kubernetes.io/projected/96177636-5cb3-41e8-bf8b-1f34597a85ac-kube-api-access-gfp4q\") pod \"keystone-db-sync-ng4n6\" (UID: \"96177636-5cb3-41e8-bf8b-1f34597a85ac\") " pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.438978 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96177636-5cb3-41e8-bf8b-1f34597a85ac-config-data\") pod \"keystone-db-sync-ng4n6\" (UID: \"96177636-5cb3-41e8-bf8b-1f34597a85ac\") " pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.440091 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96177636-5cb3-41e8-bf8b-1f34597a85ac-combined-ca-bundle\") pod \"keystone-db-sync-ng4n6\" (UID: \"96177636-5cb3-41e8-bf8b-1f34597a85ac\") " pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.440274 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qds72\" (UniqueName: \"kubernetes.io/projected/f8d7f1ca-06ea-4138-bb6c-56fa633775b9-kube-api-access-qds72\") pod \"barbican-d0d0-account-create-update-4d9n5\" (UID: \"f8d7f1ca-06ea-4138-bb6c-56fa633775b9\") " pod="openstack/barbican-d0d0-account-create-update-4d9n5" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.440307 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8d7f1ca-06ea-4138-bb6c-56fa633775b9-operator-scripts\") pod \"barbican-d0d0-account-create-update-4d9n5\" (UID: \"f8d7f1ca-06ea-4138-bb6c-56fa633775b9\") " pod="openstack/barbican-d0d0-account-create-update-4d9n5" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.443343 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8d7f1ca-06ea-4138-bb6c-56fa633775b9-operator-scripts\") pod \"barbican-d0d0-account-create-update-4d9n5\" (UID: \"f8d7f1ca-06ea-4138-bb6c-56fa633775b9\") " pod="openstack/barbican-d0d0-account-create-update-4d9n5" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.445645 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96177636-5cb3-41e8-bf8b-1f34597a85ac-config-data\") pod \"keystone-db-sync-ng4n6\" (UID: \"96177636-5cb3-41e8-bf8b-1f34597a85ac\") " pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.447073 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96177636-5cb3-41e8-bf8b-1f34597a85ac-combined-ca-bundle\") pod \"keystone-db-sync-ng4n6\" (UID: \"96177636-5cb3-41e8-bf8b-1f34597a85ac\") " pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.447123 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-nlrcw"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.448479 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-nlrcw" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.463478 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-nlrcw"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.465875 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qds72\" (UniqueName: \"kubernetes.io/projected/f8d7f1ca-06ea-4138-bb6c-56fa633775b9-kube-api-access-qds72\") pod \"barbican-d0d0-account-create-update-4d9n5\" (UID: \"f8d7f1ca-06ea-4138-bb6c-56fa633775b9\") " pod="openstack/barbican-d0d0-account-create-update-4d9n5" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.486934 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfp4q\" (UniqueName: \"kubernetes.io/projected/96177636-5cb3-41e8-bf8b-1f34597a85ac-kube-api-access-gfp4q\") pod \"keystone-db-sync-ng4n6\" (UID: \"96177636-5cb3-41e8-bf8b-1f34597a85ac\") " pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.503071 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8165-account-create-update-tq5mc"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.504411 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8165-account-create-update-tq5mc" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.506117 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.513141 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8165-account-create-update-tq5mc"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.614661 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d0d0-account-create-update-4d9n5" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.624143 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.641079 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" event={"ID":"61ddbb14-d8b7-4c38-a398-e0d93aba33db","Type":"ContainerStarted","Data":"058037da95b5cfa3ee15b9a86dc2dc941ffee76a641968c2c09b0d105e9341c3"} Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.641159 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-pswpw" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.650483 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bk9lf\" (UniqueName: \"kubernetes.io/projected/d6465136-7a18-47c1-bab2-b1a407a75ea2-kube-api-access-bk9lf\") pod \"neutron-db-create-nlrcw\" (UID: \"d6465136-7a18-47c1-bab2-b1a407a75ea2\") " pod="openstack/neutron-db-create-nlrcw" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.652933 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5450c752-02be-4423-9f58-0a840439f5f4-operator-scripts\") pod \"neutron-8165-account-create-update-tq5mc\" (UID: \"5450c752-02be-4423-9f58-0a840439f5f4\") " pod="openstack/neutron-8165-account-create-update-tq5mc" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.653217 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6s2lr\" (UniqueName: \"kubernetes.io/projected/5450c752-02be-4423-9f58-0a840439f5f4-kube-api-access-6s2lr\") pod \"neutron-8165-account-create-update-tq5mc\" (UID: \"5450c752-02be-4423-9f58-0a840439f5f4\") " pod="openstack/neutron-8165-account-create-update-tq5mc" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.653243 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6465136-7a18-47c1-bab2-b1a407a75ea2-operator-scripts\") pod \"neutron-db-create-nlrcw\" (UID: \"d6465136-7a18-47c1-bab2-b1a407a75ea2\") " pod="openstack/neutron-db-create-nlrcw" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.712531 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-pswpw"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.720676 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-pswpw"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.754810 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bk9lf\" (UniqueName: \"kubernetes.io/projected/d6465136-7a18-47c1-bab2-b1a407a75ea2-kube-api-access-bk9lf\") pod \"neutron-db-create-nlrcw\" (UID: \"d6465136-7a18-47c1-bab2-b1a407a75ea2\") " pod="openstack/neutron-db-create-nlrcw" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.754893 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5450c752-02be-4423-9f58-0a840439f5f4-operator-scripts\") pod \"neutron-8165-account-create-update-tq5mc\" (UID: \"5450c752-02be-4423-9f58-0a840439f5f4\") " pod="openstack/neutron-8165-account-create-update-tq5mc" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.756021 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5450c752-02be-4423-9f58-0a840439f5f4-operator-scripts\") pod \"neutron-8165-account-create-update-tq5mc\" (UID: \"5450c752-02be-4423-9f58-0a840439f5f4\") " pod="openstack/neutron-8165-account-create-update-tq5mc" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.756196 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6s2lr\" (UniqueName: \"kubernetes.io/projected/5450c752-02be-4423-9f58-0a840439f5f4-kube-api-access-6s2lr\") pod \"neutron-8165-account-create-update-tq5mc\" (UID: \"5450c752-02be-4423-9f58-0a840439f5f4\") " pod="openstack/neutron-8165-account-create-update-tq5mc" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.756237 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6465136-7a18-47c1-bab2-b1a407a75ea2-operator-scripts\") pod \"neutron-db-create-nlrcw\" (UID: \"d6465136-7a18-47c1-bab2-b1a407a75ea2\") " pod="openstack/neutron-db-create-nlrcw" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.756899 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6465136-7a18-47c1-bab2-b1a407a75ea2-operator-scripts\") pod \"neutron-db-create-nlrcw\" (UID: \"d6465136-7a18-47c1-bab2-b1a407a75ea2\") " pod="openstack/neutron-db-create-nlrcw" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.797722 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6s2lr\" (UniqueName: \"kubernetes.io/projected/5450c752-02be-4423-9f58-0a840439f5f4-kube-api-access-6s2lr\") pod \"neutron-8165-account-create-update-tq5mc\" (UID: \"5450c752-02be-4423-9f58-0a840439f5f4\") " pod="openstack/neutron-8165-account-create-update-tq5mc" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.799031 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bk9lf\" (UniqueName: \"kubernetes.io/projected/d6465136-7a18-47c1-bab2-b1a407a75ea2-kube-api-access-bk9lf\") pod \"neutron-db-create-nlrcw\" (UID: \"d6465136-7a18-47c1-bab2-b1a407a75ea2\") " pod="openstack/neutron-db-create-nlrcw" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.849733 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-sccfv"] Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.860762 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8165-account-create-update-tq5mc" Dec 11 10:55:53 crc kubenswrapper[5016]: I1211 10:55:53.957495 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-a34d-account-create-update-kc72d"] Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.053781 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-ng4n6"] Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.077751 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-nlrcw" Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.128644 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-v969h"] Dec 11 10:55:54 crc kubenswrapper[5016]: W1211 10:55:54.161969 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3617c283_48bc_48fb_8421_b67c914e54ed.slice/crio-ef880049e492c3fcdc0d65fb00cc039c485c9932c172d3f86ac3ab0d956372b9 WatchSource:0}: Error finding container ef880049e492c3fcdc0d65fb00cc039c485c9932c172d3f86ac3ab0d956372b9: Status 404 returned error can't find the container with id ef880049e492c3fcdc0d65fb00cc039c485c9932c172d3f86ac3ab0d956372b9 Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.228411 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8165-account-create-update-tq5mc"] Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.320714 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d0d0-account-create-update-4d9n5"] Dec 11 10:55:54 crc kubenswrapper[5016]: W1211 10:55:54.340746 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8d7f1ca_06ea_4138_bb6c_56fa633775b9.slice/crio-86c0855104a1b9f216e97eed15c6950e5d01ac5f54229fc30aa3d7b2143380d5 WatchSource:0}: Error finding container 86c0855104a1b9f216e97eed15c6950e5d01ac5f54229fc30aa3d7b2143380d5: Status 404 returned error can't find the container with id 86c0855104a1b9f216e97eed15c6950e5d01ac5f54229fc30aa3d7b2143380d5 Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.658005 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d0d0-account-create-update-4d9n5" event={"ID":"f8d7f1ca-06ea-4138-bb6c-56fa633775b9","Type":"ContainerStarted","Data":"7bb507ca06f22c64bd69b2e6122981cbc6784ae00b2e0a29bacabc545f15c2f9"} Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.658147 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d0d0-account-create-update-4d9n5" event={"ID":"f8d7f1ca-06ea-4138-bb6c-56fa633775b9","Type":"ContainerStarted","Data":"86c0855104a1b9f216e97eed15c6950e5d01ac5f54229fc30aa3d7b2143380d5"} Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.660134 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-ng4n6" event={"ID":"96177636-5cb3-41e8-bf8b-1f34597a85ac","Type":"ContainerStarted","Data":"77b4262ae7eaf634e4f477de1847f54826c79b10e4b578323bdd0bcd7b306550"} Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.661875 5016 generic.go:334] "Generic (PLEG): container finished" podID="37302689-affc-4cf7-87af-b83bb550b54f" containerID="5636b2ff84eaf306130fc233af14fa56371d34df685f64f43cec59011b843098" exitCode=0 Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.661951 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-a34d-account-create-update-kc72d" event={"ID":"37302689-affc-4cf7-87af-b83bb550b54f","Type":"ContainerDied","Data":"5636b2ff84eaf306130fc233af14fa56371d34df685f64f43cec59011b843098"} Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.661981 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-a34d-account-create-update-kc72d" event={"ID":"37302689-affc-4cf7-87af-b83bb550b54f","Type":"ContainerStarted","Data":"754e21229db203333f13c52f9ab22ca5ec0fcb498fb0291626bed6850037445d"} Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.664641 5016 generic.go:334] "Generic (PLEG): container finished" podID="03c01ad0-d6ea-441d-9c31-ad70526210fe" containerID="426d29ba7043089bb47e27086bb0ae362c8aff3274315409d4980ee4491cc493" exitCode=0 Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.664685 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sccfv" event={"ID":"03c01ad0-d6ea-441d-9c31-ad70526210fe","Type":"ContainerDied","Data":"426d29ba7043089bb47e27086bb0ae362c8aff3274315409d4980ee4491cc493"} Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.664703 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sccfv" event={"ID":"03c01ad0-d6ea-441d-9c31-ad70526210fe","Type":"ContainerStarted","Data":"24f1ad1933deee9569ed806feeae71b472d1aa45c574d43e723d159d941bf5f2"} Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.677183 5016 generic.go:334] "Generic (PLEG): container finished" podID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" containerID="b9cd0f101da872ceef563416b1dd8a66791cd3d2765ceb9ef8d43c64a9f0181e" exitCode=0 Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.678506 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" event={"ID":"61ddbb14-d8b7-4c38-a398-e0d93aba33db","Type":"ContainerDied","Data":"b9cd0f101da872ceef563416b1dd8a66791cd3d2765ceb9ef8d43c64a9f0181e"} Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.685483 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8165-account-create-update-tq5mc" event={"ID":"5450c752-02be-4423-9f58-0a840439f5f4","Type":"ContainerStarted","Data":"bf47c557f33afb7ef18f3e16124eedaa525c3b7b1090d21b8f9c8d7b2e98a427"} Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.685625 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8165-account-create-update-tq5mc" event={"ID":"5450c752-02be-4423-9f58-0a840439f5f4","Type":"ContainerStarted","Data":"521268c1698ca3282fbd4a2df13868a742d49d6cf326b6120e67dfaa41b38937"} Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.690989 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-nlrcw"] Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.695326 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-v969h" event={"ID":"3617c283-48bc-48fb-8421-b67c914e54ed","Type":"ContainerStarted","Data":"18b2b1300a89f2085d38838c70a3361cf9342d758fda36b5bfcc42aad0c37219"} Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.695372 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-v969h" event={"ID":"3617c283-48bc-48fb-8421-b67c914e54ed","Type":"ContainerStarted","Data":"ef880049e492c3fcdc0d65fb00cc039c485c9932c172d3f86ac3ab0d956372b9"} Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.695796 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-d0d0-account-create-update-4d9n5" podStartSLOduration=1.6957673789999999 podStartE2EDuration="1.695767379s" podCreationTimestamp="2025-12-11 10:55:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:55:54.676861056 +0000 UTC m=+1271.495420645" watchObservedRunningTime="2025-12-11 10:55:54.695767379 +0000 UTC m=+1271.514326958" Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.767637 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-v969h" podStartSLOduration=1.767612616 podStartE2EDuration="1.767612616s" podCreationTimestamp="2025-12-11 10:55:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:55:54.750318163 +0000 UTC m=+1271.568877742" watchObservedRunningTime="2025-12-11 10:55:54.767612616 +0000 UTC m=+1271.586172215" Dec 11 10:55:54 crc kubenswrapper[5016]: I1211 10:55:54.792337 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-8165-account-create-update-tq5mc" podStartSLOduration=1.79231154 podStartE2EDuration="1.79231154s" podCreationTimestamp="2025-12-11 10:55:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:55:54.772243509 +0000 UTC m=+1271.590803108" watchObservedRunningTime="2025-12-11 10:55:54.79231154 +0000 UTC m=+1271.610871129" Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.489187 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfc791a2-9b91-493e-95a4-9df77ffbf088" path="/var/lib/kubelet/pods/cfc791a2-9b91-493e-95a4-9df77ffbf088/volumes" Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.716522 5016 generic.go:334] "Generic (PLEG): container finished" podID="f8d7f1ca-06ea-4138-bb6c-56fa633775b9" containerID="7bb507ca06f22c64bd69b2e6122981cbc6784ae00b2e0a29bacabc545f15c2f9" exitCode=0 Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.720923 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d0d0-account-create-update-4d9n5" event={"ID":"f8d7f1ca-06ea-4138-bb6c-56fa633775b9","Type":"ContainerDied","Data":"7bb507ca06f22c64bd69b2e6122981cbc6784ae00b2e0a29bacabc545f15c2f9"} Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.726146 5016 generic.go:334] "Generic (PLEG): container finished" podID="d6465136-7a18-47c1-bab2-b1a407a75ea2" containerID="9f9918fe3ab9d959f65a3b9db625c833dbf785de2f412b5e077f278554022107" exitCode=0 Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.726589 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-nlrcw" event={"ID":"d6465136-7a18-47c1-bab2-b1a407a75ea2","Type":"ContainerDied","Data":"9f9918fe3ab9d959f65a3b9db625c833dbf785de2f412b5e077f278554022107"} Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.726726 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-nlrcw" event={"ID":"d6465136-7a18-47c1-bab2-b1a407a75ea2","Type":"ContainerStarted","Data":"13a25fc9ae26761c7afead4b1a07e70f1dd79f2a75f4d770d06de982253d478e"} Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.747125 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" event={"ID":"61ddbb14-d8b7-4c38-a398-e0d93aba33db","Type":"ContainerStarted","Data":"241091772bb80f5ffba1a61edf4c5ed427a11caf621f26b77372775e4dc50920"} Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.748197 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.751713 5016 generic.go:334] "Generic (PLEG): container finished" podID="5450c752-02be-4423-9f58-0a840439f5f4" containerID="bf47c557f33afb7ef18f3e16124eedaa525c3b7b1090d21b8f9c8d7b2e98a427" exitCode=0 Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.751920 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8165-account-create-update-tq5mc" event={"ID":"5450c752-02be-4423-9f58-0a840439f5f4","Type":"ContainerDied","Data":"bf47c557f33afb7ef18f3e16124eedaa525c3b7b1090d21b8f9c8d7b2e98a427"} Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.763988 5016 generic.go:334] "Generic (PLEG): container finished" podID="3617c283-48bc-48fb-8421-b67c914e54ed" containerID="18b2b1300a89f2085d38838c70a3361cf9342d758fda36b5bfcc42aad0c37219" exitCode=0 Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.764086 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-v969h" event={"ID":"3617c283-48bc-48fb-8421-b67c914e54ed","Type":"ContainerDied","Data":"18b2b1300a89f2085d38838c70a3361cf9342d758fda36b5bfcc42aad0c37219"} Dec 11 10:55:55 crc kubenswrapper[5016]: I1211 10:55:55.821296 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" podStartSLOduration=3.821273235 podStartE2EDuration="3.821273235s" podCreationTimestamp="2025-12-11 10:55:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:55:55.797296977 +0000 UTC m=+1272.615856566" watchObservedRunningTime="2025-12-11 10:55:55.821273235 +0000 UTC m=+1272.639832834" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.282646 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-a34d-account-create-update-kc72d" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.292963 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sccfv" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.416536 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gs2g6\" (UniqueName: \"kubernetes.io/projected/37302689-affc-4cf7-87af-b83bb550b54f-kube-api-access-gs2g6\") pod \"37302689-affc-4cf7-87af-b83bb550b54f\" (UID: \"37302689-affc-4cf7-87af-b83bb550b54f\") " Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.416723 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03c01ad0-d6ea-441d-9c31-ad70526210fe-operator-scripts\") pod \"03c01ad0-d6ea-441d-9c31-ad70526210fe\" (UID: \"03c01ad0-d6ea-441d-9c31-ad70526210fe\") " Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.416769 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mzbn\" (UniqueName: \"kubernetes.io/projected/03c01ad0-d6ea-441d-9c31-ad70526210fe-kube-api-access-6mzbn\") pod \"03c01ad0-d6ea-441d-9c31-ad70526210fe\" (UID: \"03c01ad0-d6ea-441d-9c31-ad70526210fe\") " Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.416825 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37302689-affc-4cf7-87af-b83bb550b54f-operator-scripts\") pod \"37302689-affc-4cf7-87af-b83bb550b54f\" (UID: \"37302689-affc-4cf7-87af-b83bb550b54f\") " Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.418246 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37302689-affc-4cf7-87af-b83bb550b54f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "37302689-affc-4cf7-87af-b83bb550b54f" (UID: "37302689-affc-4cf7-87af-b83bb550b54f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.418394 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03c01ad0-d6ea-441d-9c31-ad70526210fe-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "03c01ad0-d6ea-441d-9c31-ad70526210fe" (UID: "03c01ad0-d6ea-441d-9c31-ad70526210fe"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.424489 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37302689-affc-4cf7-87af-b83bb550b54f-kube-api-access-gs2g6" (OuterVolumeSpecName: "kube-api-access-gs2g6") pod "37302689-affc-4cf7-87af-b83bb550b54f" (UID: "37302689-affc-4cf7-87af-b83bb550b54f"). InnerVolumeSpecName "kube-api-access-gs2g6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.425462 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03c01ad0-d6ea-441d-9c31-ad70526210fe-kube-api-access-6mzbn" (OuterVolumeSpecName: "kube-api-access-6mzbn") pod "03c01ad0-d6ea-441d-9c31-ad70526210fe" (UID: "03c01ad0-d6ea-441d-9c31-ad70526210fe"). InnerVolumeSpecName "kube-api-access-6mzbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.519096 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03c01ad0-d6ea-441d-9c31-ad70526210fe-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.519130 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mzbn\" (UniqueName: \"kubernetes.io/projected/03c01ad0-d6ea-441d-9c31-ad70526210fe-kube-api-access-6mzbn\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.519145 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37302689-affc-4cf7-87af-b83bb550b54f-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.519156 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gs2g6\" (UniqueName: \"kubernetes.io/projected/37302689-affc-4cf7-87af-b83bb550b54f-kube-api-access-gs2g6\") on node \"crc\" DevicePath \"\"" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.777448 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sccfv" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.777464 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sccfv" event={"ID":"03c01ad0-d6ea-441d-9c31-ad70526210fe","Type":"ContainerDied","Data":"24f1ad1933deee9569ed806feeae71b472d1aa45c574d43e723d159d941bf5f2"} Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.777994 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24f1ad1933deee9569ed806feeae71b472d1aa45c574d43e723d159d941bf5f2" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.780355 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-a34d-account-create-update-kc72d" Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.781431 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-a34d-account-create-update-kc72d" event={"ID":"37302689-affc-4cf7-87af-b83bb550b54f","Type":"ContainerDied","Data":"754e21229db203333f13c52f9ab22ca5ec0fcb498fb0291626bed6850037445d"} Dec 11 10:55:56 crc kubenswrapper[5016]: I1211 10:55:56.781491 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="754e21229db203333f13c52f9ab22ca5ec0fcb498fb0291626bed6850037445d" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.736795 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8165-account-create-update-tq5mc" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.746717 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-nlrcw" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.751821 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-v969h" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.777603 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d0d0-account-create-update-4d9n5" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.812797 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6s2lr\" (UniqueName: \"kubernetes.io/projected/5450c752-02be-4423-9f58-0a840439f5f4-kube-api-access-6s2lr\") pod \"5450c752-02be-4423-9f58-0a840439f5f4\" (UID: \"5450c752-02be-4423-9f58-0a840439f5f4\") " Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.813055 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5450c752-02be-4423-9f58-0a840439f5f4-operator-scripts\") pod \"5450c752-02be-4423-9f58-0a840439f5f4\" (UID: \"5450c752-02be-4423-9f58-0a840439f5f4\") " Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.814105 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5450c752-02be-4423-9f58-0a840439f5f4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5450c752-02be-4423-9f58-0a840439f5f4" (UID: "5450c752-02be-4423-9f58-0a840439f5f4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.823775 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-v969h" event={"ID":"3617c283-48bc-48fb-8421-b67c914e54ed","Type":"ContainerDied","Data":"ef880049e492c3fcdc0d65fb00cc039c485c9932c172d3f86ac3ab0d956372b9"} Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.823827 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef880049e492c3fcdc0d65fb00cc039c485c9932c172d3f86ac3ab0d956372b9" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.823888 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-v969h" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.828532 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d0d0-account-create-update-4d9n5" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.828529 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d0d0-account-create-update-4d9n5" event={"ID":"f8d7f1ca-06ea-4138-bb6c-56fa633775b9","Type":"ContainerDied","Data":"86c0855104a1b9f216e97eed15c6950e5d01ac5f54229fc30aa3d7b2143380d5"} Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.828993 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86c0855104a1b9f216e97eed15c6950e5d01ac5f54229fc30aa3d7b2143380d5" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.829468 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5450c752-02be-4423-9f58-0a840439f5f4-kube-api-access-6s2lr" (OuterVolumeSpecName: "kube-api-access-6s2lr") pod "5450c752-02be-4423-9f58-0a840439f5f4" (UID: "5450c752-02be-4423-9f58-0a840439f5f4"). InnerVolumeSpecName "kube-api-access-6s2lr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.832741 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-nlrcw" event={"ID":"d6465136-7a18-47c1-bab2-b1a407a75ea2","Type":"ContainerDied","Data":"13a25fc9ae26761c7afead4b1a07e70f1dd79f2a75f4d770d06de982253d478e"} Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.832802 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13a25fc9ae26761c7afead4b1a07e70f1dd79f2a75f4d770d06de982253d478e" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.832892 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-nlrcw" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.835767 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8165-account-create-update-tq5mc" event={"ID":"5450c752-02be-4423-9f58-0a840439f5f4","Type":"ContainerDied","Data":"521268c1698ca3282fbd4a2df13868a742d49d6cf326b6120e67dfaa41b38937"} Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.835805 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="521268c1698ca3282fbd4a2df13868a742d49d6cf326b6120e67dfaa41b38937" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.835887 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8165-account-create-update-tq5mc" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.915367 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8d7f1ca-06ea-4138-bb6c-56fa633775b9-operator-scripts\") pod \"f8d7f1ca-06ea-4138-bb6c-56fa633775b9\" (UID: \"f8d7f1ca-06ea-4138-bb6c-56fa633775b9\") " Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.915684 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7qts\" (UniqueName: \"kubernetes.io/projected/3617c283-48bc-48fb-8421-b67c914e54ed-kube-api-access-w7qts\") pod \"3617c283-48bc-48fb-8421-b67c914e54ed\" (UID: \"3617c283-48bc-48fb-8421-b67c914e54ed\") " Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.915752 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3617c283-48bc-48fb-8421-b67c914e54ed-operator-scripts\") pod \"3617c283-48bc-48fb-8421-b67c914e54ed\" (UID: \"3617c283-48bc-48fb-8421-b67c914e54ed\") " Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.915852 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qds72\" (UniqueName: \"kubernetes.io/projected/f8d7f1ca-06ea-4138-bb6c-56fa633775b9-kube-api-access-qds72\") pod \"f8d7f1ca-06ea-4138-bb6c-56fa633775b9\" (UID: \"f8d7f1ca-06ea-4138-bb6c-56fa633775b9\") " Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.915873 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6465136-7a18-47c1-bab2-b1a407a75ea2-operator-scripts\") pod \"d6465136-7a18-47c1-bab2-b1a407a75ea2\" (UID: \"d6465136-7a18-47c1-bab2-b1a407a75ea2\") " Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.915899 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bk9lf\" (UniqueName: \"kubernetes.io/projected/d6465136-7a18-47c1-bab2-b1a407a75ea2-kube-api-access-bk9lf\") pod \"d6465136-7a18-47c1-bab2-b1a407a75ea2\" (UID: \"d6465136-7a18-47c1-bab2-b1a407a75ea2\") " Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.915970 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8d7f1ca-06ea-4138-bb6c-56fa633775b9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f8d7f1ca-06ea-4138-bb6c-56fa633775b9" (UID: "f8d7f1ca-06ea-4138-bb6c-56fa633775b9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.916424 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8d7f1ca-06ea-4138-bb6c-56fa633775b9-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.916448 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5450c752-02be-4423-9f58-0a840439f5f4-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.916459 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6s2lr\" (UniqueName: \"kubernetes.io/projected/5450c752-02be-4423-9f58-0a840439f5f4-kube-api-access-6s2lr\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.916549 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3617c283-48bc-48fb-8421-b67c914e54ed-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3617c283-48bc-48fb-8421-b67c914e54ed" (UID: "3617c283-48bc-48fb-8421-b67c914e54ed"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.916636 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d6465136-7a18-47c1-bab2-b1a407a75ea2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d6465136-7a18-47c1-bab2-b1a407a75ea2" (UID: "d6465136-7a18-47c1-bab2-b1a407a75ea2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.919886 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8d7f1ca-06ea-4138-bb6c-56fa633775b9-kube-api-access-qds72" (OuterVolumeSpecName: "kube-api-access-qds72") pod "f8d7f1ca-06ea-4138-bb6c-56fa633775b9" (UID: "f8d7f1ca-06ea-4138-bb6c-56fa633775b9"). InnerVolumeSpecName "kube-api-access-qds72". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.920862 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3617c283-48bc-48fb-8421-b67c914e54ed-kube-api-access-w7qts" (OuterVolumeSpecName: "kube-api-access-w7qts") pod "3617c283-48bc-48fb-8421-b67c914e54ed" (UID: "3617c283-48bc-48fb-8421-b67c914e54ed"). InnerVolumeSpecName "kube-api-access-w7qts". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:00 crc kubenswrapper[5016]: I1211 10:56:00.921525 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6465136-7a18-47c1-bab2-b1a407a75ea2-kube-api-access-bk9lf" (OuterVolumeSpecName: "kube-api-access-bk9lf") pod "d6465136-7a18-47c1-bab2-b1a407a75ea2" (UID: "d6465136-7a18-47c1-bab2-b1a407a75ea2"). InnerVolumeSpecName "kube-api-access-bk9lf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:01 crc kubenswrapper[5016]: I1211 10:56:01.018359 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7qts\" (UniqueName: \"kubernetes.io/projected/3617c283-48bc-48fb-8421-b67c914e54ed-kube-api-access-w7qts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:01 crc kubenswrapper[5016]: I1211 10:56:01.018430 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3617c283-48bc-48fb-8421-b67c914e54ed-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:01 crc kubenswrapper[5016]: I1211 10:56:01.018444 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qds72\" (UniqueName: \"kubernetes.io/projected/f8d7f1ca-06ea-4138-bb6c-56fa633775b9-kube-api-access-qds72\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:01 crc kubenswrapper[5016]: I1211 10:56:01.018459 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6465136-7a18-47c1-bab2-b1a407a75ea2-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:01 crc kubenswrapper[5016]: I1211 10:56:01.018474 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bk9lf\" (UniqueName: \"kubernetes.io/projected/d6465136-7a18-47c1-bab2-b1a407a75ea2-kube-api-access-bk9lf\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:01 crc kubenswrapper[5016]: I1211 10:56:01.543278 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 11 10:56:01 crc kubenswrapper[5016]: I1211 10:56:01.850894 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-ng4n6" event={"ID":"96177636-5cb3-41e8-bf8b-1f34597a85ac","Type":"ContainerStarted","Data":"ad32aec390c81c640a9f6fdcedcdb8c610dd6fbc2e7f2c1df1df14262d574d39"} Dec 11 10:56:02 crc kubenswrapper[5016]: I1211 10:56:02.381984 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:56:02 crc kubenswrapper[5016]: I1211 10:56:02.403522 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-ng4n6" podStartSLOduration=2.653908162 podStartE2EDuration="9.40349962s" podCreationTimestamp="2025-12-11 10:55:53 +0000 UTC" firstStartedPulling="2025-12-11 10:55:54.063049825 +0000 UTC m=+1270.881609404" lastFinishedPulling="2025-12-11 10:56:00.812641283 +0000 UTC m=+1277.631200862" observedRunningTime="2025-12-11 10:56:01.876917232 +0000 UTC m=+1278.695476821" watchObservedRunningTime="2025-12-11 10:56:02.40349962 +0000 UTC m=+1279.222059199" Dec 11 10:56:02 crc kubenswrapper[5016]: I1211 10:56:02.439855 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-6h9sw"] Dec 11 10:56:02 crc kubenswrapper[5016]: I1211 10:56:02.440143 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" podUID="e22ba3c0-863e-417e-bbfe-6ca4426f9936" containerName="dnsmasq-dns" containerID="cri-o://2386f5dcccc094129c96b672321197b1f7bad5c4bff95188cca67d1db6f06ae7" gracePeriod=10 Dec 11 10:56:02 crc kubenswrapper[5016]: I1211 10:56:02.874051 5016 generic.go:334] "Generic (PLEG): container finished" podID="e22ba3c0-863e-417e-bbfe-6ca4426f9936" containerID="2386f5dcccc094129c96b672321197b1f7bad5c4bff95188cca67d1db6f06ae7" exitCode=0 Dec 11 10:56:02 crc kubenswrapper[5016]: I1211 10:56:02.874387 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" event={"ID":"e22ba3c0-863e-417e-bbfe-6ca4426f9936","Type":"ContainerDied","Data":"2386f5dcccc094129c96b672321197b1f7bad5c4bff95188cca67d1db6f06ae7"} Dec 11 10:56:02 crc kubenswrapper[5016]: I1211 10:56:02.943729 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.059974 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-ovsdbserver-sb\") pod \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.060401 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-config\") pod \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.060459 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-dns-svc\") pod \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.060481 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-ovsdbserver-nb\") pod \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.060524 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ltcsz\" (UniqueName: \"kubernetes.io/projected/e22ba3c0-863e-417e-bbfe-6ca4426f9936-kube-api-access-ltcsz\") pod \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\" (UID: \"e22ba3c0-863e-417e-bbfe-6ca4426f9936\") " Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.084081 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e22ba3c0-863e-417e-bbfe-6ca4426f9936-kube-api-access-ltcsz" (OuterVolumeSpecName: "kube-api-access-ltcsz") pod "e22ba3c0-863e-417e-bbfe-6ca4426f9936" (UID: "e22ba3c0-863e-417e-bbfe-6ca4426f9936"). InnerVolumeSpecName "kube-api-access-ltcsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.119897 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e22ba3c0-863e-417e-bbfe-6ca4426f9936" (UID: "e22ba3c0-863e-417e-bbfe-6ca4426f9936"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.122476 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-config" (OuterVolumeSpecName: "config") pod "e22ba3c0-863e-417e-bbfe-6ca4426f9936" (UID: "e22ba3c0-863e-417e-bbfe-6ca4426f9936"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.125234 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e22ba3c0-863e-417e-bbfe-6ca4426f9936" (UID: "e22ba3c0-863e-417e-bbfe-6ca4426f9936"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.128865 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e22ba3c0-863e-417e-bbfe-6ca4426f9936" (UID: "e22ba3c0-863e-417e-bbfe-6ca4426f9936"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.162712 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.163036 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.163118 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.163226 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e22ba3c0-863e-417e-bbfe-6ca4426f9936-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.163306 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ltcsz\" (UniqueName: \"kubernetes.io/projected/e22ba3c0-863e-417e-bbfe-6ca4426f9936-kube-api-access-ltcsz\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.884419 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" event={"ID":"e22ba3c0-863e-417e-bbfe-6ca4426f9936","Type":"ContainerDied","Data":"beef41358df5d09134c7f99ae2cfa18e8f8191c8734d5f5f83935a24d9a70c18"} Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.884884 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-6h9sw" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.885454 5016 scope.go:117] "RemoveContainer" containerID="2386f5dcccc094129c96b672321197b1f7bad5c4bff95188cca67d1db6f06ae7" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.912165 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-6h9sw"] Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.916657 5016 scope.go:117] "RemoveContainer" containerID="fb840a84a6c291b99bbbaeca9aa0d24e2a9096473bae8d54b271b3ca743c72cc" Dec 11 10:56:03 crc kubenswrapper[5016]: I1211 10:56:03.919230 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-6h9sw"] Dec 11 10:56:05 crc kubenswrapper[5016]: I1211 10:56:05.484877 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e22ba3c0-863e-417e-bbfe-6ca4426f9936" path="/var/lib/kubelet/pods/e22ba3c0-863e-417e-bbfe-6ca4426f9936/volumes" Dec 11 10:56:05 crc kubenswrapper[5016]: I1211 10:56:05.906236 5016 generic.go:334] "Generic (PLEG): container finished" podID="96177636-5cb3-41e8-bf8b-1f34597a85ac" containerID="ad32aec390c81c640a9f6fdcedcdb8c610dd6fbc2e7f2c1df1df14262d574d39" exitCode=0 Dec 11 10:56:05 crc kubenswrapper[5016]: I1211 10:56:05.906404 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-ng4n6" event={"ID":"96177636-5cb3-41e8-bf8b-1f34597a85ac","Type":"ContainerDied","Data":"ad32aec390c81c640a9f6fdcedcdb8c610dd6fbc2e7f2c1df1df14262d574d39"} Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.282748 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.432328 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96177636-5cb3-41e8-bf8b-1f34597a85ac-config-data\") pod \"96177636-5cb3-41e8-bf8b-1f34597a85ac\" (UID: \"96177636-5cb3-41e8-bf8b-1f34597a85ac\") " Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.432436 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96177636-5cb3-41e8-bf8b-1f34597a85ac-combined-ca-bundle\") pod \"96177636-5cb3-41e8-bf8b-1f34597a85ac\" (UID: \"96177636-5cb3-41e8-bf8b-1f34597a85ac\") " Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.432550 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfp4q\" (UniqueName: \"kubernetes.io/projected/96177636-5cb3-41e8-bf8b-1f34597a85ac-kube-api-access-gfp4q\") pod \"96177636-5cb3-41e8-bf8b-1f34597a85ac\" (UID: \"96177636-5cb3-41e8-bf8b-1f34597a85ac\") " Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.441406 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96177636-5cb3-41e8-bf8b-1f34597a85ac-kube-api-access-gfp4q" (OuterVolumeSpecName: "kube-api-access-gfp4q") pod "96177636-5cb3-41e8-bf8b-1f34597a85ac" (UID: "96177636-5cb3-41e8-bf8b-1f34597a85ac"). InnerVolumeSpecName "kube-api-access-gfp4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.473092 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96177636-5cb3-41e8-bf8b-1f34597a85ac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "96177636-5cb3-41e8-bf8b-1f34597a85ac" (UID: "96177636-5cb3-41e8-bf8b-1f34597a85ac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.490060 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96177636-5cb3-41e8-bf8b-1f34597a85ac-config-data" (OuterVolumeSpecName: "config-data") pod "96177636-5cb3-41e8-bf8b-1f34597a85ac" (UID: "96177636-5cb3-41e8-bf8b-1f34597a85ac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.534860 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96177636-5cb3-41e8-bf8b-1f34597a85ac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.534899 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfp4q\" (UniqueName: \"kubernetes.io/projected/96177636-5cb3-41e8-bf8b-1f34597a85ac-kube-api-access-gfp4q\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.534912 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96177636-5cb3-41e8-bf8b-1f34597a85ac-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.928970 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-ng4n6" event={"ID":"96177636-5cb3-41e8-bf8b-1f34597a85ac","Type":"ContainerDied","Data":"77b4262ae7eaf634e4f477de1847f54826c79b10e4b578323bdd0bcd7b306550"} Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.929012 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="77b4262ae7eaf634e4f477de1847f54826c79b10e4b578323bdd0bcd7b306550" Dec 11 10:56:07 crc kubenswrapper[5016]: I1211 10:56:07.929056 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-ng4n6" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225082 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-dpms2"] Dec 11 10:56:08 crc kubenswrapper[5016]: E1211 10:56:08.225585 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37302689-affc-4cf7-87af-b83bb550b54f" containerName="mariadb-account-create-update" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225603 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="37302689-affc-4cf7-87af-b83bb550b54f" containerName="mariadb-account-create-update" Dec 11 10:56:08 crc kubenswrapper[5016]: E1211 10:56:08.225613 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03c01ad0-d6ea-441d-9c31-ad70526210fe" containerName="mariadb-database-create" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225619 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="03c01ad0-d6ea-441d-9c31-ad70526210fe" containerName="mariadb-database-create" Dec 11 10:56:08 crc kubenswrapper[5016]: E1211 10:56:08.225632 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6465136-7a18-47c1-bab2-b1a407a75ea2" containerName="mariadb-database-create" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225638 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6465136-7a18-47c1-bab2-b1a407a75ea2" containerName="mariadb-database-create" Dec 11 10:56:08 crc kubenswrapper[5016]: E1211 10:56:08.225649 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96177636-5cb3-41e8-bf8b-1f34597a85ac" containerName="keystone-db-sync" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225656 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="96177636-5cb3-41e8-bf8b-1f34597a85ac" containerName="keystone-db-sync" Dec 11 10:56:08 crc kubenswrapper[5016]: E1211 10:56:08.225665 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3617c283-48bc-48fb-8421-b67c914e54ed" containerName="mariadb-database-create" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225671 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="3617c283-48bc-48fb-8421-b67c914e54ed" containerName="mariadb-database-create" Dec 11 10:56:08 crc kubenswrapper[5016]: E1211 10:56:08.225685 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e22ba3c0-863e-417e-bbfe-6ca4426f9936" containerName="dnsmasq-dns" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225692 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e22ba3c0-863e-417e-bbfe-6ca4426f9936" containerName="dnsmasq-dns" Dec 11 10:56:08 crc kubenswrapper[5016]: E1211 10:56:08.225701 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5450c752-02be-4423-9f58-0a840439f5f4" containerName="mariadb-account-create-update" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225707 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="5450c752-02be-4423-9f58-0a840439f5f4" containerName="mariadb-account-create-update" Dec 11 10:56:08 crc kubenswrapper[5016]: E1211 10:56:08.225720 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e22ba3c0-863e-417e-bbfe-6ca4426f9936" containerName="init" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225726 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e22ba3c0-863e-417e-bbfe-6ca4426f9936" containerName="init" Dec 11 10:56:08 crc kubenswrapper[5016]: E1211 10:56:08.225737 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8d7f1ca-06ea-4138-bb6c-56fa633775b9" containerName="mariadb-account-create-update" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225744 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8d7f1ca-06ea-4138-bb6c-56fa633775b9" containerName="mariadb-account-create-update" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225892 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="03c01ad0-d6ea-441d-9c31-ad70526210fe" containerName="mariadb-database-create" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225902 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="5450c752-02be-4423-9f58-0a840439f5f4" containerName="mariadb-account-create-update" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225918 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="96177636-5cb3-41e8-bf8b-1f34597a85ac" containerName="keystone-db-sync" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225927 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8d7f1ca-06ea-4138-bb6c-56fa633775b9" containerName="mariadb-account-create-update" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225954 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="e22ba3c0-863e-417e-bbfe-6ca4426f9936" containerName="dnsmasq-dns" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225969 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6465136-7a18-47c1-bab2-b1a407a75ea2" containerName="mariadb-database-create" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225983 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="37302689-affc-4cf7-87af-b83bb550b54f" containerName="mariadb-account-create-update" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.225996 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="3617c283-48bc-48fb-8421-b67c914e54ed" containerName="mariadb-database-create" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.227039 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.233391 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-mxdc5"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.235337 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.240813 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.241227 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.241337 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.241367 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-g8dhv" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.241430 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.247158 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-dpms2"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.253145 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-mxdc5"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.349918 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-combined-ca-bundle\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.350302 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-scripts\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.350335 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.350362 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-fernet-keys\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.350408 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.350479 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-config\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.350500 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.350526 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckqxc\" (UniqueName: \"kubernetes.io/projected/508d3a8c-8510-4123-b8e6-5e4208992fd8-kube-api-access-ckqxc\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.350562 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-credential-keys\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.350591 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.350614 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-config-data\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.350657 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dztm\" (UniqueName: \"kubernetes.io/projected/52d4105e-ccb5-43e4-bdc3-416d58571c0a-kube-api-access-9dztm\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.421129 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5f7dbcfb75-ncjth"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.422952 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.429253 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-wh6s6" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.429541 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.429914 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.430148 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.445638 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5f7dbcfb75-ncjth"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.452255 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-credential-keys\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.452317 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.452378 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-config-data\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.452406 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dztm\" (UniqueName: \"kubernetes.io/projected/52d4105e-ccb5-43e4-bdc3-416d58571c0a-kube-api-access-9dztm\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.452517 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-combined-ca-bundle\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.452554 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-scripts\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.452580 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.452607 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-fernet-keys\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.452658 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.452741 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-config\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.452764 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.452788 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckqxc\" (UniqueName: \"kubernetes.io/projected/508d3a8c-8510-4123-b8e6-5e4208992fd8-kube-api-access-ckqxc\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.454194 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.454292 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-config\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.455598 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.455686 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.462365 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-combined-ca-bundle\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.462851 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-credential-keys\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.467970 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-scripts\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.468249 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.473429 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-config-data\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.475040 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-fernet-keys\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.496708 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckqxc\" (UniqueName: \"kubernetes.io/projected/508d3a8c-8510-4123-b8e6-5e4208992fd8-kube-api-access-ckqxc\") pod \"dnsmasq-dns-bbf5cc879-dpms2\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.530302 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dztm\" (UniqueName: \"kubernetes.io/projected/52d4105e-ccb5-43e4-bdc3-416d58571c0a-kube-api-access-9dztm\") pod \"keystone-bootstrap-mxdc5\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.554165 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-scripts\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.554259 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-config-data\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.554284 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6bc5\" (UniqueName: \"kubernetes.io/projected/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-kube-api-access-q6bc5\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.554415 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-horizon-secret-key\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.554472 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-logs\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.568629 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.568798 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-dgkjf"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.580255 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.586050 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.587725 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-6mwkk" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.588032 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.590814 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.631662 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-jnv4g"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.632958 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.647398 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.647613 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-j2cht" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.647727 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.657623 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d31ff49c-2515-4b93-b3b8-e776e3190ab7-etc-machine-id\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.657664 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-scripts\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.657687 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-scripts\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.657705 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-config-data\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.657721 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6bc5\" (UniqueName: \"kubernetes.io/projected/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-kube-api-access-q6bc5\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.657782 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-horizon-secret-key\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.657812 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-combined-ca-bundle\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.657832 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-db-sync-config-data\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.657850 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-logs\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.657877 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bdh2\" (UniqueName: \"kubernetes.io/projected/d31ff49c-2515-4b93-b3b8-e776e3190ab7-kube-api-access-6bdh2\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.657898 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-config-data\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.659019 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-scripts\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.660676 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-config-data\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.660911 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-logs\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.663571 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.670382 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-horizon-secret-key\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.682051 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.682967 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-dgkjf"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.684196 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.685204 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.690258 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6bc5\" (UniqueName: \"kubernetes.io/projected/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-kube-api-access-q6bc5\") pod \"horizon-5f7dbcfb75-ncjth\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.693921 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-jnv4g"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.723204 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.754598 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759155 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49507f32-2b67-4dc4-a968-a691ca6c8454-log-httpd\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759251 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759297 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf05bc17-f548-45a7-a1c1-eb32b12957d2-combined-ca-bundle\") pod \"neutron-db-sync-jnv4g\" (UID: \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\") " pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759337 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-config-data\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759367 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-combined-ca-bundle\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759388 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-db-sync-config-data\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759423 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgsp6\" (UniqueName: \"kubernetes.io/projected/cf05bc17-f548-45a7-a1c1-eb32b12957d2-kube-api-access-xgsp6\") pod \"neutron-db-sync-jnv4g\" (UID: \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\") " pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759468 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bdh2\" (UniqueName: \"kubernetes.io/projected/d31ff49c-2515-4b93-b3b8-e776e3190ab7-kube-api-access-6bdh2\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759496 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-scripts\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759517 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-config-data\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759563 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759622 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cf05bc17-f548-45a7-a1c1-eb32b12957d2-config\") pod \"neutron-db-sync-jnv4g\" (UID: \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\") " pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759652 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d31ff49c-2515-4b93-b3b8-e776e3190ab7-etc-machine-id\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.759679 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-scripts\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.763456 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49507f32-2b67-4dc4-a968-a691ca6c8454-run-httpd\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.763531 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsn2p\" (UniqueName: \"kubernetes.io/projected/49507f32-2b67-4dc4-a968-a691ca6c8454-kube-api-access-tsn2p\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.764456 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d31ff49c-2515-4b93-b3b8-e776e3190ab7-etc-machine-id\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.768343 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-db-sync-config-data\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.781635 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-combined-ca-bundle\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.783129 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-config-data\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.786829 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-scripts\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.823513 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bdh2\" (UniqueName: \"kubernetes.io/projected/d31ff49c-2515-4b93-b3b8-e776e3190ab7-kube-api-access-6bdh2\") pod \"cinder-db-sync-dgkjf\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.856494 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-44rj9"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.857910 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.861428 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-wqc95" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.861928 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.873093 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.873171 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf05bc17-f548-45a7-a1c1-eb32b12957d2-combined-ca-bundle\") pod \"neutron-db-sync-jnv4g\" (UID: \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\") " pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.873215 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-config-data\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.873283 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgsp6\" (UniqueName: \"kubernetes.io/projected/cf05bc17-f548-45a7-a1c1-eb32b12957d2-kube-api-access-xgsp6\") pod \"neutron-db-sync-jnv4g\" (UID: \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\") " pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.873340 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-scripts\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.873396 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.873447 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cf05bc17-f548-45a7-a1c1-eb32b12957d2-config\") pod \"neutron-db-sync-jnv4g\" (UID: \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\") " pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.873497 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49507f32-2b67-4dc4-a968-a691ca6c8454-run-httpd\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.873525 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsn2p\" (UniqueName: \"kubernetes.io/projected/49507f32-2b67-4dc4-a968-a691ca6c8454-kube-api-access-tsn2p\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.873554 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49507f32-2b67-4dc4-a968-a691ca6c8454-log-httpd\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.874159 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49507f32-2b67-4dc4-a968-a691ca6c8454-log-httpd\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.874709 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49507f32-2b67-4dc4-a968-a691ca6c8454-run-httpd\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.882294 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-config-data\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.889442 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-56459f8665-dvsnq"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.891466 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.895161 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/cf05bc17-f548-45a7-a1c1-eb32b12957d2-config\") pod \"neutron-db-sync-jnv4g\" (UID: \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\") " pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.902456 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.905364 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-scripts\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.914102 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf05bc17-f548-45a7-a1c1-eb32b12957d2-combined-ca-bundle\") pod \"neutron-db-sync-jnv4g\" (UID: \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\") " pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.928029 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgsp6\" (UniqueName: \"kubernetes.io/projected/cf05bc17-f548-45a7-a1c1-eb32b12957d2-kube-api-access-xgsp6\") pod \"neutron-db-sync-jnv4g\" (UID: \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\") " pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.929527 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.929822 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsn2p\" (UniqueName: \"kubernetes.io/projected/49507f32-2b67-4dc4-a968-a691ca6c8454-kube-api-access-tsn2p\") pod \"ceilometer-0\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " pod="openstack/ceilometer-0" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.934004 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-44rj9"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.959220 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-dpms2"] Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.999074 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hh5jz\" (UniqueName: \"kubernetes.io/projected/91645737-c66c-42cb-8c87-1d7bded844e1-kube-api-access-hh5jz\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.999768 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ae211270-86fb-4d5e-a028-49d60d9a6685-db-sync-config-data\") pod \"barbican-db-sync-44rj9\" (UID: \"ae211270-86fb-4d5e-a028-49d60d9a6685\") " pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:08 crc kubenswrapper[5016]: I1211 10:56:08.999881 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91645737-c66c-42cb-8c87-1d7bded844e1-logs\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.000036 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/91645737-c66c-42cb-8c87-1d7bded844e1-config-data\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.000155 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/91645737-c66c-42cb-8c87-1d7bded844e1-horizon-secret-key\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.000392 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfjhw\" (UniqueName: \"kubernetes.io/projected/ae211270-86fb-4d5e-a028-49d60d9a6685-kube-api-access-rfjhw\") pod \"barbican-db-sync-44rj9\" (UID: \"ae211270-86fb-4d5e-a028-49d60d9a6685\") " pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.000473 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/91645737-c66c-42cb-8c87-1d7bded844e1-scripts\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.003101 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae211270-86fb-4d5e-a028-49d60d9a6685-combined-ca-bundle\") pod \"barbican-db-sync-44rj9\" (UID: \"ae211270-86fb-4d5e-a028-49d60d9a6685\") " pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.006368 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-56459f8665-dvsnq"] Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.056466 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.069121 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.071258 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.077933 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-wgjtw" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.078483 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.088345 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.099811 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.099918 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.100277 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-42gmc"] Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.102117 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.105242 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae211270-86fb-4d5e-a028-49d60d9a6685-combined-ca-bundle\") pod \"barbican-db-sync-44rj9\" (UID: \"ae211270-86fb-4d5e-a028-49d60d9a6685\") " pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.105330 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hh5jz\" (UniqueName: \"kubernetes.io/projected/91645737-c66c-42cb-8c87-1d7bded844e1-kube-api-access-hh5jz\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.105358 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ae211270-86fb-4d5e-a028-49d60d9a6685-db-sync-config-data\") pod \"barbican-db-sync-44rj9\" (UID: \"ae211270-86fb-4d5e-a028-49d60d9a6685\") " pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.105397 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91645737-c66c-42cb-8c87-1d7bded844e1-logs\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.105420 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/91645737-c66c-42cb-8c87-1d7bded844e1-config-data\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.105475 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/91645737-c66c-42cb-8c87-1d7bded844e1-horizon-secret-key\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.105560 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfjhw\" (UniqueName: \"kubernetes.io/projected/ae211270-86fb-4d5e-a028-49d60d9a6685-kube-api-access-rfjhw\") pod \"barbican-db-sync-44rj9\" (UID: \"ae211270-86fb-4d5e-a028-49d60d9a6685\") " pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.105598 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/91645737-c66c-42cb-8c87-1d7bded844e1-scripts\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.106134 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91645737-c66c-42cb-8c87-1d7bded844e1-logs\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.106408 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/91645737-c66c-42cb-8c87-1d7bded844e1-scripts\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.107957 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/91645737-c66c-42cb-8c87-1d7bded844e1-config-data\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.113808 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-vmt79"] Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.132636 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.118094 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/91645737-c66c-42cb-8c87-1d7bded844e1-horizon-secret-key\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.130771 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae211270-86fb-4d5e-a028-49d60d9a6685-combined-ca-bundle\") pod \"barbican-db-sync-44rj9\" (UID: \"ae211270-86fb-4d5e-a028-49d60d9a6685\") " pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.135789 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ae211270-86fb-4d5e-a028-49d60d9a6685-db-sync-config-data\") pod \"barbican-db-sync-44rj9\" (UID: \"ae211270-86fb-4d5e-a028-49d60d9a6685\") " pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.137209 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfjhw\" (UniqueName: \"kubernetes.io/projected/ae211270-86fb-4d5e-a028-49d60d9a6685-kube-api-access-rfjhw\") pod \"barbican-db-sync-44rj9\" (UID: \"ae211270-86fb-4d5e-a028-49d60d9a6685\") " pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.137710 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.138043 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.138167 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-8k4lc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.140503 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.144501 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hh5jz\" (UniqueName: \"kubernetes.io/projected/91645737-c66c-42cb-8c87-1d7bded844e1-kube-api-access-hh5jz\") pod \"horizon-56459f8665-dvsnq\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.166407 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-42gmc"] Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.185003 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vmt79"] Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.196804 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.207325 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hd4l\" (UniqueName: \"kubernetes.io/projected/b32cb27a-82a5-4839-b8e9-0197513e6579-kube-api-access-6hd4l\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.207376 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.207407 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.207447 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4b58628-6bc5-4fab-b806-9c1f615c006c-logs\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.207481 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b32cb27a-82a5-4839-b8e9-0197513e6579-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.207499 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbrjl\" (UniqueName: \"kubernetes.io/projected/22451284-6148-4113-a7f5-7c7009092dbe-kube-api-access-gbrjl\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.207892 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-scripts\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.207917 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-combined-ca-bundle\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.207957 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-config-data\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.207984 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b32cb27a-82a5-4839-b8e9-0197513e6579-logs\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.207999 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.208019 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-config\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.208046 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.208062 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.208086 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.208101 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-config-data\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.208119 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-scripts\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.208158 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.208178 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdjwd\" (UniqueName: \"kubernetes.io/projected/b4b58628-6bc5-4fab-b806-9c1f615c006c-kube-api-access-cdjwd\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.233146 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.243452 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.309737 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b32cb27a-82a5-4839-b8e9-0197513e6579-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.309798 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbrjl\" (UniqueName: \"kubernetes.io/projected/22451284-6148-4113-a7f5-7c7009092dbe-kube-api-access-gbrjl\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.309883 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-scripts\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.309914 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-combined-ca-bundle\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.309974 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-config-data\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310017 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b32cb27a-82a5-4839-b8e9-0197513e6579-logs\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310040 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310068 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-config\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310106 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310124 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310158 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310182 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-config-data\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310218 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-scripts\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310238 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310256 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdjwd\" (UniqueName: \"kubernetes.io/projected/b4b58628-6bc5-4fab-b806-9c1f615c006c-kube-api-access-cdjwd\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310278 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hd4l\" (UniqueName: \"kubernetes.io/projected/b32cb27a-82a5-4839-b8e9-0197513e6579-kube-api-access-6hd4l\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310298 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310323 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310361 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4b58628-6bc5-4fab-b806-9c1f615c006c-logs\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310380 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b32cb27a-82a5-4839-b8e9-0197513e6579-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310767 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4b58628-6bc5-4fab-b806-9c1f615c006c-logs\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.310842 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.311413 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.313421 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-config\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.314819 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b32cb27a-82a5-4839-b8e9-0197513e6579-logs\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.315096 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.317014 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.317640 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-combined-ca-bundle\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.317708 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.318734 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-config-data\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.319332 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-scripts\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.325430 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-scripts\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.326379 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-config-data\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.327612 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.329842 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.329894 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hd4l\" (UniqueName: \"kubernetes.io/projected/b32cb27a-82a5-4839-b8e9-0197513e6579-kube-api-access-6hd4l\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.335419 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdjwd\" (UniqueName: \"kubernetes.io/projected/b4b58628-6bc5-4fab-b806-9c1f615c006c-kube-api-access-cdjwd\") pod \"placement-db-sync-vmt79\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.335482 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbrjl\" (UniqueName: \"kubernetes.io/projected/22451284-6148-4113-a7f5-7c7009092dbe-kube-api-access-gbrjl\") pod \"dnsmasq-dns-56df8fb6b7-42gmc\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.349711 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.425634 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.446486 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.475761 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.500970 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-dpms2"] Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.554366 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.558025 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.565519 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.565838 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.594888 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.617580 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.617643 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4m4b7\" (UniqueName: \"kubernetes.io/projected/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-kube-api-access-4m4b7\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.617704 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.617772 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-logs\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.617801 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.617836 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.617876 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.617927 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.620994 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-mxdc5"] Dec 11 10:56:09 crc kubenswrapper[5016]: W1211 10:56:09.670093 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52d4105e_ccb5_43e4_bdc3_416d58571c0a.slice/crio-e738c21c56e2d760047588d62f9e4b7363ff8b596769b234d8067227dcab0005 WatchSource:0}: Error finding container e738c21c56e2d760047588d62f9e4b7363ff8b596769b234d8067227dcab0005: Status 404 returned error can't find the container with id e738c21c56e2d760047588d62f9e4b7363ff8b596769b234d8067227dcab0005 Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.719648 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.719706 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4m4b7\" (UniqueName: \"kubernetes.io/projected/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-kube-api-access-4m4b7\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.719737 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.719792 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-logs\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.719812 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.719837 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.719865 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.719902 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.722722 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-logs\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.723266 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.725153 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.730802 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.735749 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.739740 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.747080 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.761770 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4m4b7\" (UniqueName: \"kubernetes.io/projected/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-kube-api-access-4m4b7\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.786276 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.872534 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5f7dbcfb75-ncjth"] Dec 11 10:56:09 crc kubenswrapper[5016]: W1211 10:56:09.877895 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd374fc64_fa7d_476d_8ab5_5d2f4bcd26d1.slice/crio-b64ff7965e401ce86fbd205a8da7d06758d4b8d9637f52703187dd7041c1abe5 WatchSource:0}: Error finding container b64ff7965e401ce86fbd205a8da7d06758d4b8d9637f52703187dd7041c1abe5: Status 404 returned error can't find the container with id b64ff7965e401ce86fbd205a8da7d06758d4b8d9637f52703187dd7041c1abe5 Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.902954 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-dgkjf"] Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.984033 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" event={"ID":"508d3a8c-8510-4123-b8e6-5e4208992fd8","Type":"ContainerStarted","Data":"600c25eedc6b29f2a5979fe302137f47dbffc97e4e33844a9a14dabb4321a902"} Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.984253 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" podUID="508d3a8c-8510-4123-b8e6-5e4208992fd8" containerName="init" containerID="cri-o://4bf4d636a6c6949b6f2bbf6cfcaa7575216db8f5e9107175ba39c974db12967c" gracePeriod=10 Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.990038 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mxdc5" event={"ID":"52d4105e-ccb5-43e4-bdc3-416d58571c0a","Type":"ContainerStarted","Data":"c13ef5cb6f5b82c13b0c2c2d0484802be6a440acefd3d7d7b1e9a00981f1453c"} Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.990069 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mxdc5" event={"ID":"52d4105e-ccb5-43e4-bdc3-416d58571c0a","Type":"ContainerStarted","Data":"e738c21c56e2d760047588d62f9e4b7363ff8b596769b234d8067227dcab0005"} Dec 11 10:56:09 crc kubenswrapper[5016]: I1211 10:56:09.995378 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f7dbcfb75-ncjth" event={"ID":"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1","Type":"ContainerStarted","Data":"b64ff7965e401ce86fbd205a8da7d06758d4b8d9637f52703187dd7041c1abe5"} Dec 11 10:56:10 crc kubenswrapper[5016]: I1211 10:56:10.000459 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dgkjf" event={"ID":"d31ff49c-2515-4b93-b3b8-e776e3190ab7","Type":"ContainerStarted","Data":"0752d0f4d8f884f3d957eeeec24f5bd075c2eba2887b057a0cfadade2658cc36"} Dec 11 10:56:10 crc kubenswrapper[5016]: I1211 10:56:10.041440 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:10 crc kubenswrapper[5016]: I1211 10:56:10.063912 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-mxdc5" podStartSLOduration=2.063889693 podStartE2EDuration="2.063889693s" podCreationTimestamp="2025-12-11 10:56:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:10.040883529 +0000 UTC m=+1286.859443128" watchObservedRunningTime="2025-12-11 10:56:10.063889693 +0000 UTC m=+1286.882449282" Dec 11 10:56:10 crc kubenswrapper[5016]: I1211 10:56:10.261675 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-56459f8665-dvsnq"] Dec 11 10:56:10 crc kubenswrapper[5016]: I1211 10:56:10.293532 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-jnv4g"] Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:10.316521 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:10.349005 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-44rj9"] Dec 11 10:56:11 crc kubenswrapper[5016]: W1211 10:56:10.375328 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae211270_86fb_4d5e_a028_49d60d9a6685.slice/crio-9fb64f12f09e27aba6ec46839d20a1bab1cc7d75cb22617a6541b450e320d13b WatchSource:0}: Error finding container 9fb64f12f09e27aba6ec46839d20a1bab1cc7d75cb22617a6541b450e320d13b: Status 404 returned error can't find the container with id 9fb64f12f09e27aba6ec46839d20a1bab1cc7d75cb22617a6541b450e320d13b Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:10.605058 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-42gmc"] Dec 11 10:56:11 crc kubenswrapper[5016]: W1211 10:56:10.643274 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod22451284_6148_4113_a7f5_7c7009092dbe.slice/crio-e80d8cedd7ec2cbc886616b8a63ef190cf3a108b37f660075ce85424b3181506 WatchSource:0}: Error finding container e80d8cedd7ec2cbc886616b8a63ef190cf3a108b37f660075ce85424b3181506: Status 404 returned error can't find the container with id e80d8cedd7ec2cbc886616b8a63ef190cf3a108b37f660075ce85424b3181506 Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:10.701805 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vmt79"] Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:10.862959 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:56:11 crc kubenswrapper[5016]: W1211 10:56:10.879283 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2bd6befe_c1c8_42d5_9f8d_093ec14a6ae0.slice/crio-69e9c53f09dfeca68cd6909ca2dd9fdd176f25a7502a21140e03cb25c3633572 WatchSource:0}: Error finding container 69e9c53f09dfeca68cd6909ca2dd9fdd176f25a7502a21140e03cb25c3633572: Status 404 returned error can't find the container with id 69e9c53f09dfeca68cd6909ca2dd9fdd176f25a7502a21140e03cb25c3633572 Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:10.922584 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.029863 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56459f8665-dvsnq" event={"ID":"91645737-c66c-42cb-8c87-1d7bded844e1","Type":"ContainerStarted","Data":"3e6a5d7e8b57a2496fad144b7e1d142e1b5a5322bd10b7f89c0dcaccaa365afd"} Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.036735 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vmt79" event={"ID":"b4b58628-6bc5-4fab-b806-9c1f615c006c","Type":"ContainerStarted","Data":"0c412c4a6b0588e77d2d09a790a5c6abbed4c393526292ed27ec1a606f6c324d"} Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.041569 5016 generic.go:334] "Generic (PLEG): container finished" podID="508d3a8c-8510-4123-b8e6-5e4208992fd8" containerID="4bf4d636a6c6949b6f2bbf6cfcaa7575216db8f5e9107175ba39c974db12967c" exitCode=0 Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.041648 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" event={"ID":"508d3a8c-8510-4123-b8e6-5e4208992fd8","Type":"ContainerDied","Data":"4bf4d636a6c6949b6f2bbf6cfcaa7575216db8f5e9107175ba39c974db12967c"} Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.041683 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" event={"ID":"508d3a8c-8510-4123-b8e6-5e4208992fd8","Type":"ContainerDied","Data":"600c25eedc6b29f2a5979fe302137f47dbffc97e4e33844a9a14dabb4321a902"} Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.041705 5016 scope.go:117] "RemoveContainer" containerID="4bf4d636a6c6949b6f2bbf6cfcaa7575216db8f5e9107175ba39c974db12967c" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.041878 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-dpms2" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.046608 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0","Type":"ContainerStarted","Data":"69e9c53f09dfeca68cd6909ca2dd9fdd176f25a7502a21140e03cb25c3633572"} Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.049125 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49507f32-2b67-4dc4-a968-a691ca6c8454","Type":"ContainerStarted","Data":"619d93f807fb092aef2d07dcb8fd2927296312918b4e543b60616008235be231"} Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.052434 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" event={"ID":"22451284-6148-4113-a7f5-7c7009092dbe","Type":"ContainerStarted","Data":"e80d8cedd7ec2cbc886616b8a63ef190cf3a108b37f660075ce85424b3181506"} Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.057124 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-config\") pod \"508d3a8c-8510-4123-b8e6-5e4208992fd8\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.057249 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-ovsdbserver-nb\") pod \"508d3a8c-8510-4123-b8e6-5e4208992fd8\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.057313 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-dns-swift-storage-0\") pod \"508d3a8c-8510-4123-b8e6-5e4208992fd8\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.057457 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckqxc\" (UniqueName: \"kubernetes.io/projected/508d3a8c-8510-4123-b8e6-5e4208992fd8-kube-api-access-ckqxc\") pod \"508d3a8c-8510-4123-b8e6-5e4208992fd8\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.057565 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-dns-svc\") pod \"508d3a8c-8510-4123-b8e6-5e4208992fd8\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.057640 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-ovsdbserver-sb\") pod \"508d3a8c-8510-4123-b8e6-5e4208992fd8\" (UID: \"508d3a8c-8510-4123-b8e6-5e4208992fd8\") " Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.064137 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-jnv4g" event={"ID":"cf05bc17-f548-45a7-a1c1-eb32b12957d2","Type":"ContainerStarted","Data":"8c7e176f190fa0f3cd9adb64d746b62b13612906e538492fe864cdac5e6662f0"} Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.070079 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-44rj9" event={"ID":"ae211270-86fb-4d5e-a028-49d60d9a6685","Type":"ContainerStarted","Data":"9fb64f12f09e27aba6ec46839d20a1bab1cc7d75cb22617a6541b450e320d13b"} Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.073309 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/508d3a8c-8510-4123-b8e6-5e4208992fd8-kube-api-access-ckqxc" (OuterVolumeSpecName: "kube-api-access-ckqxc") pod "508d3a8c-8510-4123-b8e6-5e4208992fd8" (UID: "508d3a8c-8510-4123-b8e6-5e4208992fd8"). InnerVolumeSpecName "kube-api-access-ckqxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.095510 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "508d3a8c-8510-4123-b8e6-5e4208992fd8" (UID: "508d3a8c-8510-4123-b8e6-5e4208992fd8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.101023 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "508d3a8c-8510-4123-b8e6-5e4208992fd8" (UID: "508d3a8c-8510-4123-b8e6-5e4208992fd8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.105375 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "508d3a8c-8510-4123-b8e6-5e4208992fd8" (UID: "508d3a8c-8510-4123-b8e6-5e4208992fd8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.110003 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-jnv4g" podStartSLOduration=3.109979996 podStartE2EDuration="3.109979996s" podCreationTimestamp="2025-12-11 10:56:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:11.106712996 +0000 UTC m=+1287.925272575" watchObservedRunningTime="2025-12-11 10:56:11.109979996 +0000 UTC m=+1287.928539585" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.125532 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-config" (OuterVolumeSpecName: "config") pod "508d3a8c-8510-4123-b8e6-5e4208992fd8" (UID: "508d3a8c-8510-4123-b8e6-5e4208992fd8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.125661 5016 scope.go:117] "RemoveContainer" containerID="4bf4d636a6c6949b6f2bbf6cfcaa7575216db8f5e9107175ba39c974db12967c" Dec 11 10:56:11 crc kubenswrapper[5016]: E1211 10:56:11.127424 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bf4d636a6c6949b6f2bbf6cfcaa7575216db8f5e9107175ba39c974db12967c\": container with ID starting with 4bf4d636a6c6949b6f2bbf6cfcaa7575216db8f5e9107175ba39c974db12967c not found: ID does not exist" containerID="4bf4d636a6c6949b6f2bbf6cfcaa7575216db8f5e9107175ba39c974db12967c" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.127473 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bf4d636a6c6949b6f2bbf6cfcaa7575216db8f5e9107175ba39c974db12967c"} err="failed to get container status \"4bf4d636a6c6949b6f2bbf6cfcaa7575216db8f5e9107175ba39c974db12967c\": rpc error: code = NotFound desc = could not find container \"4bf4d636a6c6949b6f2bbf6cfcaa7575216db8f5e9107175ba39c974db12967c\": container with ID starting with 4bf4d636a6c6949b6f2bbf6cfcaa7575216db8f5e9107175ba39c974db12967c not found: ID does not exist" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.132457 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "508d3a8c-8510-4123-b8e6-5e4208992fd8" (UID: "508d3a8c-8510-4123-b8e6-5e4208992fd8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.161752 5016 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.161785 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckqxc\" (UniqueName: \"kubernetes.io/projected/508d3a8c-8510-4123-b8e6-5e4208992fd8-kube-api-access-ckqxc\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.161796 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.161805 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.161815 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.161826 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/508d3a8c-8510-4123-b8e6-5e4208992fd8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.324272 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.370566 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-56459f8665-dvsnq"] Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.392122 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5778bcf8bf-cpvlx"] Dec 11 10:56:11 crc kubenswrapper[5016]: E1211 10:56:11.392666 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="508d3a8c-8510-4123-b8e6-5e4208992fd8" containerName="init" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.392680 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="508d3a8c-8510-4123-b8e6-5e4208992fd8" containerName="init" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.392896 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="508d3a8c-8510-4123-b8e6-5e4208992fd8" containerName="init" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.394177 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.453129 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5778bcf8bf-cpvlx"] Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.529898 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.533897 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-dpms2"] Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.555269 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-dpms2"] Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.576734 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/188faef9-9759-4921-ba09-08b7a8f84854-horizon-secret-key\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.576855 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/188faef9-9759-4921-ba09-08b7a8f84854-scripts\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.576922 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dwm6\" (UniqueName: \"kubernetes.io/projected/188faef9-9759-4921-ba09-08b7a8f84854-kube-api-access-8dwm6\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.577012 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/188faef9-9759-4921-ba09-08b7a8f84854-config-data\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.577051 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/188faef9-9759-4921-ba09-08b7a8f84854-logs\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.671454 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.680884 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/188faef9-9759-4921-ba09-08b7a8f84854-logs\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.680993 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/188faef9-9759-4921-ba09-08b7a8f84854-horizon-secret-key\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.681064 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/188faef9-9759-4921-ba09-08b7a8f84854-scripts\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.681116 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dwm6\" (UniqueName: \"kubernetes.io/projected/188faef9-9759-4921-ba09-08b7a8f84854-kube-api-access-8dwm6\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.681181 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/188faef9-9759-4921-ba09-08b7a8f84854-config-data\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.682206 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/188faef9-9759-4921-ba09-08b7a8f84854-logs\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.683154 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/188faef9-9759-4921-ba09-08b7a8f84854-scripts\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.683688 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/188faef9-9759-4921-ba09-08b7a8f84854-config-data\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.694081 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/188faef9-9759-4921-ba09-08b7a8f84854-horizon-secret-key\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.705571 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dwm6\" (UniqueName: \"kubernetes.io/projected/188faef9-9759-4921-ba09-08b7a8f84854-kube-api-access-8dwm6\") pod \"horizon-5778bcf8bf-cpvlx\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.754049 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:11 crc kubenswrapper[5016]: I1211 10:56:11.773482 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:56:12 crc kubenswrapper[5016]: I1211 10:56:12.191609 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-jnv4g" event={"ID":"cf05bc17-f548-45a7-a1c1-eb32b12957d2","Type":"ContainerStarted","Data":"cb6cc762b8d5740c91ba7612b7c34ba2f706e69d6383afe3c9fdfc6e22c5abc8"} Dec 11 10:56:12 crc kubenswrapper[5016]: I1211 10:56:12.232668 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b32cb27a-82a5-4839-b8e9-0197513e6579","Type":"ContainerStarted","Data":"a7108eac6dc3d3292c38940cf6f92d7f194db21f95da5b9c959530d7bd761995"} Dec 11 10:56:12 crc kubenswrapper[5016]: I1211 10:56:12.290815 5016 generic.go:334] "Generic (PLEG): container finished" podID="22451284-6148-4113-a7f5-7c7009092dbe" containerID="6f639f5c0e6d4f6573048450c5dc2c37d9568e3a0b53d57de3129f76c042ffd7" exitCode=0 Dec 11 10:56:12 crc kubenswrapper[5016]: I1211 10:56:12.290896 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" event={"ID":"22451284-6148-4113-a7f5-7c7009092dbe","Type":"ContainerDied","Data":"6f639f5c0e6d4f6573048450c5dc2c37d9568e3a0b53d57de3129f76c042ffd7"} Dec 11 10:56:12 crc kubenswrapper[5016]: I1211 10:56:12.428707 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5778bcf8bf-cpvlx"] Dec 11 10:56:12 crc kubenswrapper[5016]: I1211 10:56:12.932656 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:56:12 crc kubenswrapper[5016]: I1211 10:56:12.933121 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:56:12 crc kubenswrapper[5016]: I1211 10:56:12.933187 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:56:12 crc kubenswrapper[5016]: I1211 10:56:12.934073 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"49f5883716361ecf20e37d0a33857b58813542483a33785fbd7c2c019dd8b594"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 10:56:12 crc kubenswrapper[5016]: I1211 10:56:12.934135 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://49f5883716361ecf20e37d0a33857b58813542483a33785fbd7c2c019dd8b594" gracePeriod=600 Dec 11 10:56:13 crc kubenswrapper[5016]: I1211 10:56:13.310549 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5778bcf8bf-cpvlx" event={"ID":"188faef9-9759-4921-ba09-08b7a8f84854","Type":"ContainerStarted","Data":"f090e344b3181599a2dcd3a168a312c6e0504b04610ce1367d229b55a10b9059"} Dec 11 10:56:13 crc kubenswrapper[5016]: I1211 10:56:13.314936 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0","Type":"ContainerStarted","Data":"5d94318ad3b1b98d6da92156cc934838dc103ee7d64e4f81eab12f87ffc53619"} Dec 11 10:56:13 crc kubenswrapper[5016]: I1211 10:56:13.318550 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" event={"ID":"22451284-6148-4113-a7f5-7c7009092dbe","Type":"ContainerStarted","Data":"c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249"} Dec 11 10:56:13 crc kubenswrapper[5016]: I1211 10:56:13.318708 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:13 crc kubenswrapper[5016]: I1211 10:56:13.344923 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" podStartSLOduration=5.344895953 podStartE2EDuration="5.344895953s" podCreationTimestamp="2025-12-11 10:56:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:13.340592887 +0000 UTC m=+1290.159152486" watchObservedRunningTime="2025-12-11 10:56:13.344895953 +0000 UTC m=+1290.163455542" Dec 11 10:56:13 crc kubenswrapper[5016]: I1211 10:56:13.493687 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="508d3a8c-8510-4123-b8e6-5e4208992fd8" path="/var/lib/kubelet/pods/508d3a8c-8510-4123-b8e6-5e4208992fd8/volumes" Dec 11 10:56:14 crc kubenswrapper[5016]: I1211 10:56:14.352981 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0","Type":"ContainerStarted","Data":"3d7049fb9648bac1817b5653c5746b369b4a5f166ddb639ed3b1eb13bd3f837f"} Dec 11 10:56:14 crc kubenswrapper[5016]: I1211 10:56:14.355264 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" containerName="glance-log" containerID="cri-o://5d94318ad3b1b98d6da92156cc934838dc103ee7d64e4f81eab12f87ffc53619" gracePeriod=30 Dec 11 10:56:14 crc kubenswrapper[5016]: I1211 10:56:14.355564 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" containerName="glance-httpd" containerID="cri-o://3d7049fb9648bac1817b5653c5746b369b4a5f166ddb639ed3b1eb13bd3f837f" gracePeriod=30 Dec 11 10:56:14 crc kubenswrapper[5016]: I1211 10:56:14.362776 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="49f5883716361ecf20e37d0a33857b58813542483a33785fbd7c2c019dd8b594" exitCode=0 Dec 11 10:56:14 crc kubenswrapper[5016]: I1211 10:56:14.363031 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"49f5883716361ecf20e37d0a33857b58813542483a33785fbd7c2c019dd8b594"} Dec 11 10:56:14 crc kubenswrapper[5016]: I1211 10:56:14.363099 5016 scope.go:117] "RemoveContainer" containerID="512f5c783f58cb8b023d09c68e6c5e485f14c303c2f06e1b8d93d73bedfab5d9" Dec 11 10:56:14 crc kubenswrapper[5016]: I1211 10:56:14.378995 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b32cb27a-82a5-4839-b8e9-0197513e6579","Type":"ContainerStarted","Data":"e5e4699a27ccbbca81874649384da549b987f767d1e60101fc9628bd0741ae14"} Dec 11 10:56:14 crc kubenswrapper[5016]: I1211 10:56:14.392806 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.39278118 podStartE2EDuration="6.39278118s" podCreationTimestamp="2025-12-11 10:56:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:14.386687991 +0000 UTC m=+1291.205247580" watchObservedRunningTime="2025-12-11 10:56:14.39278118 +0000 UTC m=+1291.211340759" Dec 11 10:56:15 crc kubenswrapper[5016]: I1211 10:56:15.400126 5016 generic.go:334] "Generic (PLEG): container finished" podID="2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" containerID="3d7049fb9648bac1817b5653c5746b369b4a5f166ddb639ed3b1eb13bd3f837f" exitCode=0 Dec 11 10:56:15 crc kubenswrapper[5016]: I1211 10:56:15.400787 5016 generic.go:334] "Generic (PLEG): container finished" podID="2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" containerID="5d94318ad3b1b98d6da92156cc934838dc103ee7d64e4f81eab12f87ffc53619" exitCode=143 Dec 11 10:56:15 crc kubenswrapper[5016]: I1211 10:56:15.400911 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0","Type":"ContainerDied","Data":"3d7049fb9648bac1817b5653c5746b369b4a5f166ddb639ed3b1eb13bd3f837f"} Dec 11 10:56:15 crc kubenswrapper[5016]: I1211 10:56:15.400969 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0","Type":"ContainerDied","Data":"5d94318ad3b1b98d6da92156cc934838dc103ee7d64e4f81eab12f87ffc53619"} Dec 11 10:56:15 crc kubenswrapper[5016]: I1211 10:56:15.412427 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"53da38b3e027c864a9592c4787654311b819c80dc57e5ec065e90c602166ceee"} Dec 11 10:56:15 crc kubenswrapper[5016]: I1211 10:56:15.423285 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b32cb27a-82a5-4839-b8e9-0197513e6579","Type":"ContainerStarted","Data":"59fbb704553d969f7cfb5717e3272ef950ea18c0ba3cb824200628346558e2b2"} Dec 11 10:56:15 crc kubenswrapper[5016]: I1211 10:56:15.423456 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b32cb27a-82a5-4839-b8e9-0197513e6579" containerName="glance-log" containerID="cri-o://e5e4699a27ccbbca81874649384da549b987f767d1e60101fc9628bd0741ae14" gracePeriod=30 Dec 11 10:56:15 crc kubenswrapper[5016]: I1211 10:56:15.423620 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b32cb27a-82a5-4839-b8e9-0197513e6579" containerName="glance-httpd" containerID="cri-o://59fbb704553d969f7cfb5717e3272ef950ea18c0ba3cb824200628346558e2b2" gracePeriod=30 Dec 11 10:56:15 crc kubenswrapper[5016]: I1211 10:56:15.471381 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.471352668 podStartE2EDuration="7.471352668s" podCreationTimestamp="2025-12-11 10:56:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:15.464413348 +0000 UTC m=+1292.282972947" watchObservedRunningTime="2025-12-11 10:56:15.471352668 +0000 UTC m=+1292.289912247" Dec 11 10:56:16 crc kubenswrapper[5016]: I1211 10:56:16.451974 5016 generic.go:334] "Generic (PLEG): container finished" podID="b32cb27a-82a5-4839-b8e9-0197513e6579" containerID="59fbb704553d969f7cfb5717e3272ef950ea18c0ba3cb824200628346558e2b2" exitCode=0 Dec 11 10:56:16 crc kubenswrapper[5016]: I1211 10:56:16.452662 5016 generic.go:334] "Generic (PLEG): container finished" podID="b32cb27a-82a5-4839-b8e9-0197513e6579" containerID="e5e4699a27ccbbca81874649384da549b987f767d1e60101fc9628bd0741ae14" exitCode=143 Dec 11 10:56:16 crc kubenswrapper[5016]: I1211 10:56:16.452165 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b32cb27a-82a5-4839-b8e9-0197513e6579","Type":"ContainerDied","Data":"59fbb704553d969f7cfb5717e3272ef950ea18c0ba3cb824200628346558e2b2"} Dec 11 10:56:16 crc kubenswrapper[5016]: I1211 10:56:16.452722 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b32cb27a-82a5-4839-b8e9-0197513e6579","Type":"ContainerDied","Data":"e5e4699a27ccbbca81874649384da549b987f767d1e60101fc9628bd0741ae14"} Dec 11 10:56:17 crc kubenswrapper[5016]: I1211 10:56:17.467978 5016 generic.go:334] "Generic (PLEG): container finished" podID="52d4105e-ccb5-43e4-bdc3-416d58571c0a" containerID="c13ef5cb6f5b82c13b0c2c2d0484802be6a440acefd3d7d7b1e9a00981f1453c" exitCode=0 Dec 11 10:56:17 crc kubenswrapper[5016]: I1211 10:56:17.467987 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mxdc5" event={"ID":"52d4105e-ccb5-43e4-bdc3-416d58571c0a","Type":"ContainerDied","Data":"c13ef5cb6f5b82c13b0c2c2d0484802be6a440acefd3d7d7b1e9a00981f1453c"} Dec 11 10:56:17 crc kubenswrapper[5016]: I1211 10:56:17.957724 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5f7dbcfb75-ncjth"] Dec 11 10:56:17 crc kubenswrapper[5016]: I1211 10:56:17.986905 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-78bccb96bd-btt5f"] Dec 11 10:56:17 crc kubenswrapper[5016]: I1211 10:56:17.989006 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:17 crc kubenswrapper[5016]: I1211 10:56:17.991017 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.030744 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-78bccb96bd-btt5f"] Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.052051 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5778bcf8bf-cpvlx"] Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.095194 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-horizon-secret-key\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.095257 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlhkq\" (UniqueName: \"kubernetes.io/projected/6f611e53-2b48-4371-8673-dd02e7533a7d-kube-api-access-jlhkq\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.095305 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-combined-ca-bundle\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.095348 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f611e53-2b48-4371-8673-dd02e7533a7d-logs\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.095372 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f611e53-2b48-4371-8673-dd02e7533a7d-scripts\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.095425 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6f611e53-2b48-4371-8673-dd02e7533a7d-config-data\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.095499 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-horizon-tls-certs\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.096840 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7df5fc4844-wdnrz"] Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.109885 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.148821 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7df5fc4844-wdnrz"] Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.198594 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-horizon-secret-key\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.198726 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkb24\" (UniqueName: \"kubernetes.io/projected/02741cc6-3a2a-48c1-b492-57762e0d75e6-kube-api-access-mkb24\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.198777 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlhkq\" (UniqueName: \"kubernetes.io/projected/6f611e53-2b48-4371-8673-dd02e7533a7d-kube-api-access-jlhkq\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.198811 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/02741cc6-3a2a-48c1-b492-57762e0d75e6-horizon-tls-certs\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.198867 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02741cc6-3a2a-48c1-b492-57762e0d75e6-combined-ca-bundle\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.198900 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-combined-ca-bundle\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.198981 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f611e53-2b48-4371-8673-dd02e7533a7d-logs\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.199019 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f611e53-2b48-4371-8673-dd02e7533a7d-scripts\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.199108 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6f611e53-2b48-4371-8673-dd02e7533a7d-config-data\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.199150 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02741cc6-3a2a-48c1-b492-57762e0d75e6-logs\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.199186 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/02741cc6-3a2a-48c1-b492-57762e0d75e6-config-data\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.199904 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f611e53-2b48-4371-8673-dd02e7533a7d-scripts\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.200888 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6f611e53-2b48-4371-8673-dd02e7533a7d-config-data\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.201368 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f611e53-2b48-4371-8673-dd02e7533a7d-logs\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.201554 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/02741cc6-3a2a-48c1-b492-57762e0d75e6-scripts\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.201640 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/02741cc6-3a2a-48c1-b492-57762e0d75e6-horizon-secret-key\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.201764 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-horizon-tls-certs\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.206921 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-combined-ca-bundle\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.207617 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-horizon-tls-certs\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.212349 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-horizon-secret-key\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.223903 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlhkq\" (UniqueName: \"kubernetes.io/projected/6f611e53-2b48-4371-8673-dd02e7533a7d-kube-api-access-jlhkq\") pod \"horizon-78bccb96bd-btt5f\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.303291 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/02741cc6-3a2a-48c1-b492-57762e0d75e6-horizon-tls-certs\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.303360 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02741cc6-3a2a-48c1-b492-57762e0d75e6-combined-ca-bundle\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.303490 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02741cc6-3a2a-48c1-b492-57762e0d75e6-logs\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.303520 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/02741cc6-3a2a-48c1-b492-57762e0d75e6-config-data\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.303558 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/02741cc6-3a2a-48c1-b492-57762e0d75e6-scripts\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.303582 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/02741cc6-3a2a-48c1-b492-57762e0d75e6-horizon-secret-key\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.303625 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkb24\" (UniqueName: \"kubernetes.io/projected/02741cc6-3a2a-48c1-b492-57762e0d75e6-kube-api-access-mkb24\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.305265 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/02741cc6-3a2a-48c1-b492-57762e0d75e6-scripts\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.306464 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02741cc6-3a2a-48c1-b492-57762e0d75e6-logs\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.306774 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/02741cc6-3a2a-48c1-b492-57762e0d75e6-config-data\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.308324 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/02741cc6-3a2a-48c1-b492-57762e0d75e6-horizon-tls-certs\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.308890 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02741cc6-3a2a-48c1-b492-57762e0d75e6-combined-ca-bundle\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.310849 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/02741cc6-3a2a-48c1-b492-57762e0d75e6-horizon-secret-key\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.319125 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.337967 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkb24\" (UniqueName: \"kubernetes.io/projected/02741cc6-3a2a-48c1-b492-57762e0d75e6-kube-api-access-mkb24\") pod \"horizon-7df5fc4844-wdnrz\" (UID: \"02741cc6-3a2a-48c1-b492-57762e0d75e6\") " pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:18 crc kubenswrapper[5016]: I1211 10:56:18.443025 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:19 crc kubenswrapper[5016]: I1211 10:56:19.449452 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:56:19 crc kubenswrapper[5016]: I1211 10:56:19.529736 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-79znm"] Dec 11 10:56:19 crc kubenswrapper[5016]: I1211 10:56:19.530018 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" podUID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" containerName="dnsmasq-dns" containerID="cri-o://241091772bb80f5ffba1a61edf4c5ed427a11caf621f26b77372775e4dc50920" gracePeriod=10 Dec 11 10:56:20 crc kubenswrapper[5016]: I1211 10:56:20.511591 5016 generic.go:334] "Generic (PLEG): container finished" podID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" containerID="241091772bb80f5ffba1a61edf4c5ed427a11caf621f26b77372775e4dc50920" exitCode=0 Dec 11 10:56:20 crc kubenswrapper[5016]: I1211 10:56:20.511661 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" event={"ID":"61ddbb14-d8b7-4c38-a398-e0d93aba33db","Type":"ContainerDied","Data":"241091772bb80f5ffba1a61edf4c5ed427a11caf621f26b77372775e4dc50920"} Dec 11 10:56:22 crc kubenswrapper[5016]: I1211 10:56:22.380403 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" podUID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: connect: connection refused" Dec 11 10:56:27 crc kubenswrapper[5016]: I1211 10:56:27.380541 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" podUID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: connect: connection refused" Dec 11 10:56:27 crc kubenswrapper[5016]: E1211 10:56:27.771214 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Dec 11 10:56:27 crc kubenswrapper[5016]: E1211 10:56:27.771428 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ncch5d7h65bh56bh66bh6dh6h657h8dh5fh695h6chd6h67fh699h658h68dh6hbch8dh645h96h646hbfh596h695h664h89hcfhf8h677h55fq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8dwm6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-5778bcf8bf-cpvlx_openstack(188faef9-9759-4921-ba09-08b7a8f84854): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:56:27 crc kubenswrapper[5016]: E1211 10:56:27.781705 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-5778bcf8bf-cpvlx" podUID="188faef9-9759-4921-ba09-08b7a8f84854" Dec 11 10:56:27 crc kubenswrapper[5016]: E1211 10:56:27.800754 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Dec 11 10:56:27 crc kubenswrapper[5016]: E1211 10:56:27.801054 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68bh5bbh6dh58ch5d6h74h57ch5fbh5b5hd7h58fhcbh564hdfh5dch8fh99h676hb5h66bh689h9fh597h68h9bh88h676h5ch85hdfh54ch5cdq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hh5jz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-56459f8665-dvsnq_openstack(91645737-c66c-42cb-8c87-1d7bded844e1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:56:27 crc kubenswrapper[5016]: E1211 10:56:27.803797 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-56459f8665-dvsnq" podUID="91645737-c66c-42cb-8c87-1d7bded844e1" Dec 11 10:56:29 crc kubenswrapper[5016]: E1211 10:56:29.381172 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Dec 11 10:56:29 crc kubenswrapper[5016]: E1211 10:56:29.381498 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cdjwd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-vmt79_openstack(b4b58628-6bc5-4fab-b806-9c1f615c006c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:56:29 crc kubenswrapper[5016]: E1211 10:56:29.382762 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-vmt79" podUID="b4b58628-6bc5-4fab-b806-9c1f615c006c" Dec 11 10:56:29 crc kubenswrapper[5016]: E1211 10:56:29.599493 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-vmt79" podUID="b4b58628-6bc5-4fab-b806-9c1f615c006c" Dec 11 10:56:29 crc kubenswrapper[5016]: E1211 10:56:29.854844 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Dec 11 10:56:29 crc kubenswrapper[5016]: E1211 10:56:29.855525 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68fhb8h5bch585hb9h574h5dh5fdh97h66chbfh57dh5d8h66h5f5h67dh78h66fh54fh5fbh4h679hb8h694h94h679h5d8h5d6h68h5b8h557h56bq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tsn2p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(49507f32-2b67-4dc4-a968-a691ca6c8454): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:56:29 crc kubenswrapper[5016]: E1211 10:56:29.871202 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Dec 11 10:56:29 crc kubenswrapper[5016]: E1211 10:56:29.871452 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n67bh659hb9hfdh8fh59dh58dh687h57dh54fh5f7h67ch55fh5bh94h65hc5h554h5f4hd9h5c5h687h566h67h54ch5c5h64hfdh8ch544h5f8h95q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q6bc5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-5f7dbcfb75-ncjth_openstack(d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:56:29 crc kubenswrapper[5016]: E1211 10:56:29.876050 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-5f7dbcfb75-ncjth" podUID="d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1" Dec 11 10:56:29 crc kubenswrapper[5016]: I1211 10:56:29.974996 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:29 crc kubenswrapper[5016]: I1211 10:56:29.980118 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099197 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-config-data\") pod \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099244 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4m4b7\" (UniqueName: \"kubernetes.io/projected/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-kube-api-access-4m4b7\") pod \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099276 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-config-data\") pod \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099317 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-internal-tls-certs\") pod \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099346 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-credential-keys\") pod \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099390 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099496 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-combined-ca-bundle\") pod \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099527 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-logs\") pod \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099630 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dztm\" (UniqueName: \"kubernetes.io/projected/52d4105e-ccb5-43e4-bdc3-416d58571c0a-kube-api-access-9dztm\") pod \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099655 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-fernet-keys\") pod \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099676 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-scripts\") pod \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099695 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-combined-ca-bundle\") pod \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099710 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-scripts\") pod \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\" (UID: \"52d4105e-ccb5-43e4-bdc3-416d58571c0a\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.099729 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-httpd-run\") pod \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\" (UID: \"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0\") " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.100571 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-logs" (OuterVolumeSpecName: "logs") pod "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" (UID: "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.100585 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" (UID: "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.109500 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" (UID: "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.110096 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "52d4105e-ccb5-43e4-bdc3-416d58571c0a" (UID: "52d4105e-ccb5-43e4-bdc3-416d58571c0a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.110123 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-scripts" (OuterVolumeSpecName: "scripts") pod "52d4105e-ccb5-43e4-bdc3-416d58571c0a" (UID: "52d4105e-ccb5-43e4-bdc3-416d58571c0a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.112694 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-scripts" (OuterVolumeSpecName: "scripts") pod "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" (UID: "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.112725 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52d4105e-ccb5-43e4-bdc3-416d58571c0a-kube-api-access-9dztm" (OuterVolumeSpecName: "kube-api-access-9dztm") pod "52d4105e-ccb5-43e4-bdc3-416d58571c0a" (UID: "52d4105e-ccb5-43e4-bdc3-416d58571c0a"). InnerVolumeSpecName "kube-api-access-9dztm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.113073 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-kube-api-access-4m4b7" (OuterVolumeSpecName: "kube-api-access-4m4b7") pod "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" (UID: "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0"). InnerVolumeSpecName "kube-api-access-4m4b7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.113228 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "52d4105e-ccb5-43e4-bdc3-416d58571c0a" (UID: "52d4105e-ccb5-43e4-bdc3-416d58571c0a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.131904 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-config-data" (OuterVolumeSpecName: "config-data") pod "52d4105e-ccb5-43e4-bdc3-416d58571c0a" (UID: "52d4105e-ccb5-43e4-bdc3-416d58571c0a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.166567 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-config-data" (OuterVolumeSpecName: "config-data") pod "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" (UID: "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.173159 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" (UID: "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.183653 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "52d4105e-ccb5-43e4-bdc3-416d58571c0a" (UID: "52d4105e-ccb5-43e4-bdc3-416d58571c0a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.186402 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" (UID: "2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.204117 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.204156 5016 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.204172 5016 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.205049 5016 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.205069 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.205084 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.205096 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dztm\" (UniqueName: \"kubernetes.io/projected/52d4105e-ccb5-43e4-bdc3-416d58571c0a-kube-api-access-9dztm\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.205108 5016 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.205119 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.205131 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.205142 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.205152 5016 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.205165 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52d4105e-ccb5-43e4-bdc3-416d58571c0a-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.205176 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4m4b7\" (UniqueName: \"kubernetes.io/projected/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0-kube-api-access-4m4b7\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.242608 5016 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.307770 5016 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.606582 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0","Type":"ContainerDied","Data":"69e9c53f09dfeca68cd6909ca2dd9fdd176f25a7502a21140e03cb25c3633572"} Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.606645 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.608551 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mxdc5" event={"ID":"52d4105e-ccb5-43e4-bdc3-416d58571c0a","Type":"ContainerDied","Data":"e738c21c56e2d760047588d62f9e4b7363ff8b596769b234d8067227dcab0005"} Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.608578 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mxdc5" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.608590 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e738c21c56e2d760047588d62f9e4b7363ff8b596769b234d8067227dcab0005" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.654612 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.674018 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.689292 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:56:30 crc kubenswrapper[5016]: E1211 10:56:30.689742 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" containerName="glance-httpd" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.689765 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" containerName="glance-httpd" Dec 11 10:56:30 crc kubenswrapper[5016]: E1211 10:56:30.689806 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52d4105e-ccb5-43e4-bdc3-416d58571c0a" containerName="keystone-bootstrap" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.689815 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="52d4105e-ccb5-43e4-bdc3-416d58571c0a" containerName="keystone-bootstrap" Dec 11 10:56:30 crc kubenswrapper[5016]: E1211 10:56:30.689835 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" containerName="glance-log" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.689844 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" containerName="glance-log" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.690063 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" containerName="glance-log" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.690096 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" containerName="glance-httpd" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.690108 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="52d4105e-ccb5-43e4-bdc3-416d58571c0a" containerName="keystone-bootstrap" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.694985 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.701030 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.701215 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.711528 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.814929 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.815019 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.815059 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.815093 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-logs\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.815251 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.815451 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.815585 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.815660 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46lmr\" (UniqueName: \"kubernetes.io/projected/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-kube-api-access-46lmr\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.917733 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.917822 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.917851 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46lmr\" (UniqueName: \"kubernetes.io/projected/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-kube-api-access-46lmr\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.917885 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.917921 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.917966 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.917988 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-logs\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.918036 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.918212 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.918480 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.918745 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-logs\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.922739 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.923224 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.923764 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.924029 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.934992 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46lmr\" (UniqueName: \"kubernetes.io/projected/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-kube-api-access-46lmr\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:30 crc kubenswrapper[5016]: I1211 10:56:30.958155 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.029227 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.097431 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-mxdc5"] Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.103517 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-mxdc5"] Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.188704 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-ckqjx"] Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.191490 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.196017 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.196281 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.196313 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.197018 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.197959 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-g8dhv" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.202050 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-ckqjx"] Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.325595 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-config-data\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.325956 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-combined-ca-bundle\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.326018 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-fernet-keys\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.326123 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-scripts\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.326173 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znscv\" (UniqueName: \"kubernetes.io/projected/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-kube-api-access-znscv\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.326206 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-credential-keys\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.427597 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-config-data\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.427662 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-combined-ca-bundle\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.427737 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-fernet-keys\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.427832 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-scripts\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.427868 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znscv\" (UniqueName: \"kubernetes.io/projected/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-kube-api-access-znscv\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.427887 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-credential-keys\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.434624 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-credential-keys\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.434641 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-fernet-keys\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.435226 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-scripts\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.435958 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-combined-ca-bundle\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.436029 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-config-data\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.445810 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znscv\" (UniqueName: \"kubernetes.io/projected/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-kube-api-access-znscv\") pod \"keystone-bootstrap-ckqjx\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.489748 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0" path="/var/lib/kubelet/pods/2bd6befe-c1c8-42d5-9f8d-093ec14a6ae0/volumes" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.490720 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52d4105e-ccb5-43e4-bdc3-416d58571c0a" path="/var/lib/kubelet/pods/52d4105e-ccb5-43e4-bdc3-416d58571c0a/volumes" Dec 11 10:56:31 crc kubenswrapper[5016]: I1211 10:56:31.522650 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:32 crc kubenswrapper[5016]: I1211 10:56:32.380319 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" podUID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: connect: connection refused" Dec 11 10:56:32 crc kubenswrapper[5016]: I1211 10:56:32.380442 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:56:37 crc kubenswrapper[5016]: I1211 10:56:37.380772 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" podUID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: connect: connection refused" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.367567 5016 scope.go:117] "RemoveContainer" containerID="3d7049fb9648bac1817b5653c5746b369b4a5f166ddb639ed3b1eb13bd3f837f" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.490123 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.506972 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.527882 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.546168 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.565682 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/188faef9-9759-4921-ba09-08b7a8f84854-scripts\") pod \"188faef9-9759-4921-ba09-08b7a8f84854\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.565725 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/188faef9-9759-4921-ba09-08b7a8f84854-logs\") pod \"188faef9-9759-4921-ba09-08b7a8f84854\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.565760 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91645737-c66c-42cb-8c87-1d7bded844e1-logs\") pod \"91645737-c66c-42cb-8c87-1d7bded844e1\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.565801 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/91645737-c66c-42cb-8c87-1d7bded844e1-horizon-secret-key\") pod \"91645737-c66c-42cb-8c87-1d7bded844e1\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.565854 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/91645737-c66c-42cb-8c87-1d7bded844e1-scripts\") pod \"91645737-c66c-42cb-8c87-1d7bded844e1\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.565897 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/91645737-c66c-42cb-8c87-1d7bded844e1-config-data\") pod \"91645737-c66c-42cb-8c87-1d7bded844e1\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.565910 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/188faef9-9759-4921-ba09-08b7a8f84854-horizon-secret-key\") pod \"188faef9-9759-4921-ba09-08b7a8f84854\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.565975 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/188faef9-9759-4921-ba09-08b7a8f84854-config-data\") pod \"188faef9-9759-4921-ba09-08b7a8f84854\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.566001 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dwm6\" (UniqueName: \"kubernetes.io/projected/188faef9-9759-4921-ba09-08b7a8f84854-kube-api-access-8dwm6\") pod \"188faef9-9759-4921-ba09-08b7a8f84854\" (UID: \"188faef9-9759-4921-ba09-08b7a8f84854\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.566020 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hh5jz\" (UniqueName: \"kubernetes.io/projected/91645737-c66c-42cb-8c87-1d7bded844e1-kube-api-access-hh5jz\") pod \"91645737-c66c-42cb-8c87-1d7bded844e1\" (UID: \"91645737-c66c-42cb-8c87-1d7bded844e1\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.569610 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/188faef9-9759-4921-ba09-08b7a8f84854-scripts" (OuterVolumeSpecName: "scripts") pod "188faef9-9759-4921-ba09-08b7a8f84854" (UID: "188faef9-9759-4921-ba09-08b7a8f84854"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.569757 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91645737-c66c-42cb-8c87-1d7bded844e1-scripts" (OuterVolumeSpecName: "scripts") pod "91645737-c66c-42cb-8c87-1d7bded844e1" (UID: "91645737-c66c-42cb-8c87-1d7bded844e1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.576608 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/188faef9-9759-4921-ba09-08b7a8f84854-config-data" (OuterVolumeSpecName: "config-data") pod "188faef9-9759-4921-ba09-08b7a8f84854" (UID: "188faef9-9759-4921-ba09-08b7a8f84854"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.577148 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91645737-c66c-42cb-8c87-1d7bded844e1-logs" (OuterVolumeSpecName: "logs") pod "91645737-c66c-42cb-8c87-1d7bded844e1" (UID: "91645737-c66c-42cb-8c87-1d7bded844e1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.584914 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91645737-c66c-42cb-8c87-1d7bded844e1-config-data" (OuterVolumeSpecName: "config-data") pod "91645737-c66c-42cb-8c87-1d7bded844e1" (UID: "91645737-c66c-42cb-8c87-1d7bded844e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.602253 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91645737-c66c-42cb-8c87-1d7bded844e1-kube-api-access-hh5jz" (OuterVolumeSpecName: "kube-api-access-hh5jz") pod "91645737-c66c-42cb-8c87-1d7bded844e1" (UID: "91645737-c66c-42cb-8c87-1d7bded844e1"). InnerVolumeSpecName "kube-api-access-hh5jz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.604665 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91645737-c66c-42cb-8c87-1d7bded844e1-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "91645737-c66c-42cb-8c87-1d7bded844e1" (UID: "91645737-c66c-42cb-8c87-1d7bded844e1"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.608466 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/188faef9-9759-4921-ba09-08b7a8f84854-logs" (OuterVolumeSpecName: "logs") pod "188faef9-9759-4921-ba09-08b7a8f84854" (UID: "188faef9-9759-4921-ba09-08b7a8f84854"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.612952 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/188faef9-9759-4921-ba09-08b7a8f84854-kube-api-access-8dwm6" (OuterVolumeSpecName: "kube-api-access-8dwm6") pod "188faef9-9759-4921-ba09-08b7a8f84854" (UID: "188faef9-9759-4921-ba09-08b7a8f84854"). InnerVolumeSpecName "kube-api-access-8dwm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.624438 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/188faef9-9759-4921-ba09-08b7a8f84854-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "188faef9-9759-4921-ba09-08b7a8f84854" (UID: "188faef9-9759-4921-ba09-08b7a8f84854"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669218 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b32cb27a-82a5-4839-b8e9-0197513e6579-logs\") pod \"b32cb27a-82a5-4839-b8e9-0197513e6579\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669272 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-logs\") pod \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669302 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-horizon-secret-key\") pod \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669355 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-config-data\") pod \"b32cb27a-82a5-4839-b8e9-0197513e6579\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669386 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-config-data\") pod \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669422 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-scripts\") pod \"b32cb27a-82a5-4839-b8e9-0197513e6579\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669456 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b32cb27a-82a5-4839-b8e9-0197513e6579-httpd-run\") pod \"b32cb27a-82a5-4839-b8e9-0197513e6579\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669522 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-combined-ca-bundle\") pod \"b32cb27a-82a5-4839-b8e9-0197513e6579\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669543 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-scripts\") pod \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669558 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hd4l\" (UniqueName: \"kubernetes.io/projected/b32cb27a-82a5-4839-b8e9-0197513e6579-kube-api-access-6hd4l\") pod \"b32cb27a-82a5-4839-b8e9-0197513e6579\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669581 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-public-tls-certs\") pod \"b32cb27a-82a5-4839-b8e9-0197513e6579\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669616 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6bc5\" (UniqueName: \"kubernetes.io/projected/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-kube-api-access-q6bc5\") pod \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\" (UID: \"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.669665 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"b32cb27a-82a5-4839-b8e9-0197513e6579\" (UID: \"b32cb27a-82a5-4839-b8e9-0197513e6579\") " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.670021 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/188faef9-9759-4921-ba09-08b7a8f84854-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.670033 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/188faef9-9759-4921-ba09-08b7a8f84854-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.670042 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91645737-c66c-42cb-8c87-1d7bded844e1-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.670051 5016 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/91645737-c66c-42cb-8c87-1d7bded844e1-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.670060 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/91645737-c66c-42cb-8c87-1d7bded844e1-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.670069 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/91645737-c66c-42cb-8c87-1d7bded844e1-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.670078 5016 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/188faef9-9759-4921-ba09-08b7a8f84854-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.670087 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/188faef9-9759-4921-ba09-08b7a8f84854-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.670096 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dwm6\" (UniqueName: \"kubernetes.io/projected/188faef9-9759-4921-ba09-08b7a8f84854-kube-api-access-8dwm6\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.670106 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hh5jz\" (UniqueName: \"kubernetes.io/projected/91645737-c66c-42cb-8c87-1d7bded844e1-kube-api-access-hh5jz\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.671256 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-scripts" (OuterVolumeSpecName: "scripts") pod "d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1" (UID: "d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.677656 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b32cb27a-82a5-4839-b8e9-0197513e6579-logs" (OuterVolumeSpecName: "logs") pod "b32cb27a-82a5-4839-b8e9-0197513e6579" (UID: "b32cb27a-82a5-4839-b8e9-0197513e6579"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.678150 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b32cb27a-82a5-4839-b8e9-0197513e6579-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b32cb27a-82a5-4839-b8e9-0197513e6579" (UID: "b32cb27a-82a5-4839-b8e9-0197513e6579"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.685079 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "b32cb27a-82a5-4839-b8e9-0197513e6579" (UID: "b32cb27a-82a5-4839-b8e9-0197513e6579"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.685308 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-kube-api-access-q6bc5" (OuterVolumeSpecName: "kube-api-access-q6bc5") pod "d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1" (UID: "d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1"). InnerVolumeSpecName "kube-api-access-q6bc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.687538 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1" (UID: "d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.690259 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-scripts" (OuterVolumeSpecName: "scripts") pod "b32cb27a-82a5-4839-b8e9-0197513e6579" (UID: "b32cb27a-82a5-4839-b8e9-0197513e6579"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.699720 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-logs" (OuterVolumeSpecName: "logs") pod "d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1" (UID: "d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.699993 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-config-data" (OuterVolumeSpecName: "config-data") pod "d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1" (UID: "d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.708706 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b32cb27a-82a5-4839-b8e9-0197513e6579-kube-api-access-6hd4l" (OuterVolumeSpecName: "kube-api-access-6hd4l") pod "b32cb27a-82a5-4839-b8e9-0197513e6579" (UID: "b32cb27a-82a5-4839-b8e9-0197513e6579"). InnerVolumeSpecName "kube-api-access-6hd4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.720159 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b32cb27a-82a5-4839-b8e9-0197513e6579" (UID: "b32cb27a-82a5-4839-b8e9-0197513e6579"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.750378 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5778bcf8bf-cpvlx" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.750410 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5778bcf8bf-cpvlx" event={"ID":"188faef9-9759-4921-ba09-08b7a8f84854","Type":"ContainerDied","Data":"f090e344b3181599a2dcd3a168a312c6e0504b04610ce1367d229b55a10b9059"} Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.750376 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-config-data" (OuterVolumeSpecName: "config-data") pod "b32cb27a-82a5-4839-b8e9-0197513e6579" (UID: "b32cb27a-82a5-4839-b8e9-0197513e6579"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.773347 5016 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.773386 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b32cb27a-82a5-4839-b8e9-0197513e6579-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.773400 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.773413 5016 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.773426 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.773436 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.773447 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.773458 5016 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b32cb27a-82a5-4839-b8e9-0197513e6579-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.773468 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.773481 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hd4l\" (UniqueName: \"kubernetes.io/projected/b32cb27a-82a5-4839-b8e9-0197513e6579-kube-api-access-6hd4l\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.773494 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.773504 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6bc5\" (UniqueName: \"kubernetes.io/projected/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1-kube-api-access-q6bc5\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.787701 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b32cb27a-82a5-4839-b8e9-0197513e6579","Type":"ContainerDied","Data":"a7108eac6dc3d3292c38940cf6f92d7f194db21f95da5b9c959530d7bd761995"} Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.787793 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.793907 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b32cb27a-82a5-4839-b8e9-0197513e6579" (UID: "b32cb27a-82a5-4839-b8e9-0197513e6579"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.807099 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56459f8665-dvsnq" event={"ID":"91645737-c66c-42cb-8c87-1d7bded844e1","Type":"ContainerDied","Data":"3e6a5d7e8b57a2496fad144b7e1d142e1b5a5322bd10b7f89c0dcaccaa365afd"} Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.807241 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56459f8665-dvsnq" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.816139 5016 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.823252 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f7dbcfb75-ncjth" event={"ID":"d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1","Type":"ContainerDied","Data":"b64ff7965e401ce86fbd205a8da7d06758d4b8d9637f52703187dd7041c1abe5"} Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.823350 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5f7dbcfb75-ncjth" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.857824 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5778bcf8bf-cpvlx"] Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.870015 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5778bcf8bf-cpvlx"] Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.881569 5016 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b32cb27a-82a5-4839-b8e9-0197513e6579-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.881806 5016 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.892539 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-56459f8665-dvsnq"] Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.921767 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-56459f8665-dvsnq"] Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.938349 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5f7dbcfb75-ncjth"] Dec 11 10:56:38 crc kubenswrapper[5016]: I1211 10:56:38.955464 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5f7dbcfb75-ncjth"] Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.127117 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.137058 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.236313 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:56:39 crc kubenswrapper[5016]: E1211 10:56:39.237273 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b32cb27a-82a5-4839-b8e9-0197513e6579" containerName="glance-log" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.237303 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="b32cb27a-82a5-4839-b8e9-0197513e6579" containerName="glance-log" Dec 11 10:56:39 crc kubenswrapper[5016]: E1211 10:56:39.237372 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b32cb27a-82a5-4839-b8e9-0197513e6579" containerName="glance-httpd" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.237392 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="b32cb27a-82a5-4839-b8e9-0197513e6579" containerName="glance-httpd" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.237754 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="b32cb27a-82a5-4839-b8e9-0197513e6579" containerName="glance-log" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.237791 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="b32cb27a-82a5-4839-b8e9-0197513e6579" containerName="glance-httpd" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.239224 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.242701 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.242965 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.256987 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.292678 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-logs\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.292753 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-config-data\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.292860 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.292924 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-scripts\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.292976 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.293017 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pcff\" (UniqueName: \"kubernetes.io/projected/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-kube-api-access-2pcff\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.293115 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.293154 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.395210 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.395296 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-scripts\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.395331 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.395366 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pcff\" (UniqueName: \"kubernetes.io/projected/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-kube-api-access-2pcff\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.395409 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.395428 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.395476 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-logs\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.395501 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-config-data\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.396350 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-logs\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.396657 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.396978 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.403725 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-config-data\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.403799 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-scripts\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.417815 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.419926 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.423991 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pcff\" (UniqueName: \"kubernetes.io/projected/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-kube-api-access-2pcff\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.440470 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.484417 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="188faef9-9759-4921-ba09-08b7a8f84854" path="/var/lib/kubelet/pods/188faef9-9759-4921-ba09-08b7a8f84854/volumes" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.484878 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91645737-c66c-42cb-8c87-1d7bded844e1" path="/var/lib/kubelet/pods/91645737-c66c-42cb-8c87-1d7bded844e1/volumes" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.485355 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b32cb27a-82a5-4839-b8e9-0197513e6579" path="/var/lib/kubelet/pods/b32cb27a-82a5-4839-b8e9-0197513e6579/volumes" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.486543 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1" path="/var/lib/kubelet/pods/d374fc64-fa7d-476d-8ab5-5d2f4bcd26d1/volumes" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.607545 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.832418 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-jnv4g" event={"ID":"cf05bc17-f548-45a7-a1c1-eb32b12957d2","Type":"ContainerDied","Data":"cb6cc762b8d5740c91ba7612b7c34ba2f706e69d6383afe3c9fdfc6e22c5abc8"} Dec 11 10:56:39 crc kubenswrapper[5016]: I1211 10:56:39.833107 5016 generic.go:334] "Generic (PLEG): container finished" podID="cf05bc17-f548-45a7-a1c1-eb32b12957d2" containerID="cb6cc762b8d5740c91ba7612b7c34ba2f706e69d6383afe3c9fdfc6e22c5abc8" exitCode=0 Dec 11 10:56:40 crc kubenswrapper[5016]: E1211 10:56:40.029054 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Dec 11 10:56:40 crc kubenswrapper[5016]: E1211 10:56:40.029980 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6bdh2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-dgkjf_openstack(d31ff49c-2515-4b93-b3b8-e776e3190ab7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 10:56:40 crc kubenswrapper[5016]: E1211 10:56:40.031218 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-dgkjf" podUID="d31ff49c-2515-4b93-b3b8-e776e3190ab7" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.106382 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.213086 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-dns-swift-storage-0\") pod \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.213133 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-dns-svc\") pod \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.213186 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-ovsdbserver-sb\") pod \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.213277 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-957k6\" (UniqueName: \"kubernetes.io/projected/61ddbb14-d8b7-4c38-a398-e0d93aba33db-kube-api-access-957k6\") pod \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.213336 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-ovsdbserver-nb\") pod \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.213392 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-config\") pod \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\" (UID: \"61ddbb14-d8b7-4c38-a398-e0d93aba33db\") " Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.217511 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61ddbb14-d8b7-4c38-a398-e0d93aba33db-kube-api-access-957k6" (OuterVolumeSpecName: "kube-api-access-957k6") pod "61ddbb14-d8b7-4c38-a398-e0d93aba33db" (UID: "61ddbb14-d8b7-4c38-a398-e0d93aba33db"). InnerVolumeSpecName "kube-api-access-957k6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.260970 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "61ddbb14-d8b7-4c38-a398-e0d93aba33db" (UID: "61ddbb14-d8b7-4c38-a398-e0d93aba33db"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.266312 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "61ddbb14-d8b7-4c38-a398-e0d93aba33db" (UID: "61ddbb14-d8b7-4c38-a398-e0d93aba33db"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.267535 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-config" (OuterVolumeSpecName: "config") pod "61ddbb14-d8b7-4c38-a398-e0d93aba33db" (UID: "61ddbb14-d8b7-4c38-a398-e0d93aba33db"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.274980 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "61ddbb14-d8b7-4c38-a398-e0d93aba33db" (UID: "61ddbb14-d8b7-4c38-a398-e0d93aba33db"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.275164 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "61ddbb14-d8b7-4c38-a398-e0d93aba33db" (UID: "61ddbb14-d8b7-4c38-a398-e0d93aba33db"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.316154 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.316195 5016 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.316209 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.316219 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.316230 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-957k6\" (UniqueName: \"kubernetes.io/projected/61ddbb14-d8b7-4c38-a398-e0d93aba33db-kube-api-access-957k6\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.316240 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61ddbb14-d8b7-4c38-a398-e0d93aba33db-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.449437 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7df5fc4844-wdnrz"] Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.463724 5016 scope.go:117] "RemoveContainer" containerID="5d94318ad3b1b98d6da92156cc934838dc103ee7d64e4f81eab12f87ffc53619" Dec 11 10:56:40 crc kubenswrapper[5016]: W1211 10:56:40.496490 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod02741cc6_3a2a_48c1_b492_57762e0d75e6.slice/crio-808852d4e00de22a140f3795f8a3b12df847e5dc9b2f420d833a15349381aa70 WatchSource:0}: Error finding container 808852d4e00de22a140f3795f8a3b12df847e5dc9b2f420d833a15349381aa70: Status 404 returned error can't find the container with id 808852d4e00de22a140f3795f8a3b12df847e5dc9b2f420d833a15349381aa70 Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.614125 5016 scope.go:117] "RemoveContainer" containerID="59fbb704553d969f7cfb5717e3272ef950ea18c0ba3cb824200628346558e2b2" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.682093 5016 scope.go:117] "RemoveContainer" containerID="e5e4699a27ccbbca81874649384da549b987f767d1e60101fc9628bd0741ae14" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.843686 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-78bccb96bd-btt5f"] Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.846428 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7df5fc4844-wdnrz" event={"ID":"02741cc6-3a2a-48c1-b492-57762e0d75e6","Type":"ContainerStarted","Data":"808852d4e00de22a140f3795f8a3b12df847e5dc9b2f420d833a15349381aa70"} Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.857294 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49507f32-2b67-4dc4-a968-a691ca6c8454","Type":"ContainerStarted","Data":"e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3"} Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.862001 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" event={"ID":"61ddbb14-d8b7-4c38-a398-e0d93aba33db","Type":"ContainerDied","Data":"058037da95b5cfa3ee15b9a86dc2dc941ffee76a641968c2c09b0d105e9341c3"} Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.862030 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-79znm" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.862046 5016 scope.go:117] "RemoveContainer" containerID="241091772bb80f5ffba1a61edf4c5ed427a11caf621f26b77372775e4dc50920" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.868213 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-44rj9" event={"ID":"ae211270-86fb-4d5e-a028-49d60d9a6685","Type":"ContainerStarted","Data":"9da786d94b83b298510438b22c45c1c64b2533494fe7d6bde5b66f709c2d31bc"} Dec 11 10:56:40 crc kubenswrapper[5016]: E1211 10:56:40.877394 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-dgkjf" podUID="d31ff49c-2515-4b93-b3b8-e776e3190ab7" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.916143 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-44rj9" podStartSLOduration=3.3064264 podStartE2EDuration="32.916124144s" podCreationTimestamp="2025-12-11 10:56:08 +0000 UTC" firstStartedPulling="2025-12-11 10:56:10.399588523 +0000 UTC m=+1287.218148092" lastFinishedPulling="2025-12-11 10:56:40.009286257 +0000 UTC m=+1316.827845836" observedRunningTime="2025-12-11 10:56:40.895320236 +0000 UTC m=+1317.713879815" watchObservedRunningTime="2025-12-11 10:56:40.916124144 +0000 UTC m=+1317.734683723" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.925243 5016 scope.go:117] "RemoveContainer" containerID="b9cd0f101da872ceef563416b1dd8a66791cd3d2765ceb9ef8d43c64a9f0181e" Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.941910 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-79znm"] Dec 11 10:56:40 crc kubenswrapper[5016]: I1211 10:56:40.951639 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-79znm"] Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.073101 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-ckqjx"] Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.097594 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:56:41 crc kubenswrapper[5016]: W1211 10:56:41.119763 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbc3294fd_9e5a_4075_8f7a_fa3c4b20c057.slice/crio-720b688741a3a8965149a5d19d96af94aa2b1d68421857bdb9bac472e98ae5d8 WatchSource:0}: Error finding container 720b688741a3a8965149a5d19d96af94aa2b1d68421857bdb9bac472e98ae5d8: Status 404 returned error can't find the container with id 720b688741a3a8965149a5d19d96af94aa2b1d68421857bdb9bac472e98ae5d8 Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.184686 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:56:41 crc kubenswrapper[5016]: W1211 10:56:41.193548 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8542c44c_4c37_431f_a2b1_7ff93d36f4d0.slice/crio-6adca5ccd64e20654e9fc2d5e5963437605ee4aa9cb93fc95fdae6391b3e70ad WatchSource:0}: Error finding container 6adca5ccd64e20654e9fc2d5e5963437605ee4aa9cb93fc95fdae6391b3e70ad: Status 404 returned error can't find the container with id 6adca5ccd64e20654e9fc2d5e5963437605ee4aa9cb93fc95fdae6391b3e70ad Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.216528 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.345527 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgsp6\" (UniqueName: \"kubernetes.io/projected/cf05bc17-f548-45a7-a1c1-eb32b12957d2-kube-api-access-xgsp6\") pod \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\" (UID: \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\") " Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.345787 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf05bc17-f548-45a7-a1c1-eb32b12957d2-combined-ca-bundle\") pod \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\" (UID: \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\") " Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.345963 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cf05bc17-f548-45a7-a1c1-eb32b12957d2-config\") pod \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\" (UID: \"cf05bc17-f548-45a7-a1c1-eb32b12957d2\") " Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.361226 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf05bc17-f548-45a7-a1c1-eb32b12957d2-kube-api-access-xgsp6" (OuterVolumeSpecName: "kube-api-access-xgsp6") pod "cf05bc17-f548-45a7-a1c1-eb32b12957d2" (UID: "cf05bc17-f548-45a7-a1c1-eb32b12957d2"). InnerVolumeSpecName "kube-api-access-xgsp6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.397032 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf05bc17-f548-45a7-a1c1-eb32b12957d2-config" (OuterVolumeSpecName: "config") pod "cf05bc17-f548-45a7-a1c1-eb32b12957d2" (UID: "cf05bc17-f548-45a7-a1c1-eb32b12957d2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.403815 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf05bc17-f548-45a7-a1c1-eb32b12957d2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cf05bc17-f548-45a7-a1c1-eb32b12957d2" (UID: "cf05bc17-f548-45a7-a1c1-eb32b12957d2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.447831 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgsp6\" (UniqueName: \"kubernetes.io/projected/cf05bc17-f548-45a7-a1c1-eb32b12957d2-kube-api-access-xgsp6\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.447878 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf05bc17-f548-45a7-a1c1-eb32b12957d2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.447894 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/cf05bc17-f548-45a7-a1c1-eb32b12957d2-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.530544 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" path="/var/lib/kubelet/pods/61ddbb14-d8b7-4c38-a398-e0d93aba33db/volumes" Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.903365 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7df5fc4844-wdnrz" event={"ID":"02741cc6-3a2a-48c1-b492-57762e0d75e6","Type":"ContainerStarted","Data":"6a8ef6ee22c7d86fd03132989ee618464627b699d01864b6d8f9100fc3b0f2af"} Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.905368 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8542c44c-4c37-431f-a2b1-7ff93d36f4d0","Type":"ContainerStarted","Data":"6adca5ccd64e20654e9fc2d5e5963437605ee4aa9cb93fc95fdae6391b3e70ad"} Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.911846 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78bccb96bd-btt5f" event={"ID":"6f611e53-2b48-4371-8673-dd02e7533a7d","Type":"ContainerStarted","Data":"abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900"} Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.912156 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78bccb96bd-btt5f" event={"ID":"6f611e53-2b48-4371-8673-dd02e7533a7d","Type":"ContainerStarted","Data":"f69afe42f958ebec2fad40234594d228395f2d8f455c349eefce2aa0c072282f"} Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.917710 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-ckqjx" event={"ID":"1091d9e0-69c8-499d-bf06-7aacc52d8ec6","Type":"ContainerStarted","Data":"ccfcb3457a53b85eb1d0d770f71d50169d52f4bc04e8c43a3ff5c9bb83231090"} Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.917764 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-ckqjx" event={"ID":"1091d9e0-69c8-499d-bf06-7aacc52d8ec6","Type":"ContainerStarted","Data":"29ea3cc4b919e3d9b90553a34fd7fd1cc2fa2c26123de04e40223720488d38da"} Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.922429 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057","Type":"ContainerStarted","Data":"720b688741a3a8965149a5d19d96af94aa2b1d68421857bdb9bac472e98ae5d8"} Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.926712 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-jnv4g" Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.927185 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-jnv4g" event={"ID":"cf05bc17-f548-45a7-a1c1-eb32b12957d2","Type":"ContainerDied","Data":"8c7e176f190fa0f3cd9adb64d746b62b13612906e538492fe864cdac5e6662f0"} Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.927234 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c7e176f190fa0f3cd9adb64d746b62b13612906e538492fe864cdac5e6662f0" Dec 11 10:56:41 crc kubenswrapper[5016]: I1211 10:56:41.938930 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-ckqjx" podStartSLOduration=10.938912067 podStartE2EDuration="10.938912067s" podCreationTimestamp="2025-12-11 10:56:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:41.935771971 +0000 UTC m=+1318.754331550" watchObservedRunningTime="2025-12-11 10:56:41.938912067 +0000 UTC m=+1318.757471636" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.092292 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-g7htz"] Dec 11 10:56:42 crc kubenswrapper[5016]: E1211 10:56:42.092756 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf05bc17-f548-45a7-a1c1-eb32b12957d2" containerName="neutron-db-sync" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.092773 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf05bc17-f548-45a7-a1c1-eb32b12957d2" containerName="neutron-db-sync" Dec 11 10:56:42 crc kubenswrapper[5016]: E1211 10:56:42.092795 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" containerName="init" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.092803 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" containerName="init" Dec 11 10:56:42 crc kubenswrapper[5016]: E1211 10:56:42.092823 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" containerName="dnsmasq-dns" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.092830 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" containerName="dnsmasq-dns" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.093109 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="61ddbb14-d8b7-4c38-a398-e0d93aba33db" containerName="dnsmasq-dns" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.093130 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf05bc17-f548-45a7-a1c1-eb32b12957d2" containerName="neutron-db-sync" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.094193 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.123989 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-g7htz"] Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.137701 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-76bc74566d-xxk9f"] Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.164709 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.164780 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.164916 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.165514 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-dns-svc\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.165545 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2nvd\" (UniqueName: \"kubernetes.io/projected/75099d8b-eafe-4f65-867b-877541873100-kube-api-access-p2nvd\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.165571 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-config\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.173377 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.174455 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-76bc74566d-xxk9f"] Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.180296 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.180643 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-j2cht" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.180768 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.180879 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.267739 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.268122 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-config\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.268164 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.268185 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-ovndb-tls-certs\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.268206 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-combined-ca-bundle\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.268361 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.268427 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-dns-svc\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.268453 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2nvd\" (UniqueName: \"kubernetes.io/projected/75099d8b-eafe-4f65-867b-877541873100-kube-api-access-p2nvd\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.268496 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-config\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.268535 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-httpd-config\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.268564 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlh2z\" (UniqueName: \"kubernetes.io/projected/4b4a7e5c-045f-434d-8744-60b045803cc3-kube-api-access-jlh2z\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.270646 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-config\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.270722 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.270928 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.271293 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-dns-svc\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.271800 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.298917 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2nvd\" (UniqueName: \"kubernetes.io/projected/75099d8b-eafe-4f65-867b-877541873100-kube-api-access-p2nvd\") pod \"dnsmasq-dns-6b7b667979-g7htz\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.371016 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-httpd-config\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.371111 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlh2z\" (UniqueName: \"kubernetes.io/projected/4b4a7e5c-045f-434d-8744-60b045803cc3-kube-api-access-jlh2z\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.371194 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-config\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.371254 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-ovndb-tls-certs\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.371281 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-combined-ca-bundle\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.380350 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-ovndb-tls-certs\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.382249 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-config\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.382628 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-combined-ca-bundle\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.391045 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-httpd-config\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.397582 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlh2z\" (UniqueName: \"kubernetes.io/projected/4b4a7e5c-045f-434d-8744-60b045803cc3-kube-api-access-jlh2z\") pod \"neutron-76bc74566d-xxk9f\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.468271 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.514705 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:42 crc kubenswrapper[5016]: I1211 10:56:42.979714 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78bccb96bd-btt5f" event={"ID":"6f611e53-2b48-4371-8673-dd02e7533a7d","Type":"ContainerStarted","Data":"ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887"} Dec 11 10:56:43 crc kubenswrapper[5016]: I1211 10:56:43.003138 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057","Type":"ContainerStarted","Data":"5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec"} Dec 11 10:56:43 crc kubenswrapper[5016]: I1211 10:56:43.007026 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-78bccb96bd-btt5f" podStartSLOduration=25.40517537 podStartE2EDuration="26.007010729s" podCreationTimestamp="2025-12-11 10:56:17 +0000 UTC" firstStartedPulling="2025-12-11 10:56:40.866400038 +0000 UTC m=+1317.684959617" lastFinishedPulling="2025-12-11 10:56:41.468235397 +0000 UTC m=+1318.286794976" observedRunningTime="2025-12-11 10:56:43.005335318 +0000 UTC m=+1319.823894907" watchObservedRunningTime="2025-12-11 10:56:43.007010729 +0000 UTC m=+1319.825570318" Dec 11 10:56:43 crc kubenswrapper[5016]: I1211 10:56:43.010616 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7df5fc4844-wdnrz" event={"ID":"02741cc6-3a2a-48c1-b492-57762e0d75e6","Type":"ContainerStarted","Data":"9c94bf98e6c198d5076522b6dc9d9aa9ee1355a1ddd63824cab89098cc3377ce"} Dec 11 10:56:43 crc kubenswrapper[5016]: I1211 10:56:43.040278 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7df5fc4844-wdnrz" podStartSLOduration=24.266223394 podStartE2EDuration="25.040248242s" podCreationTimestamp="2025-12-11 10:56:18 +0000 UTC" firstStartedPulling="2025-12-11 10:56:40.529302875 +0000 UTC m=+1317.347862454" lastFinishedPulling="2025-12-11 10:56:41.303327723 +0000 UTC m=+1318.121887302" observedRunningTime="2025-12-11 10:56:43.037498005 +0000 UTC m=+1319.856057594" watchObservedRunningTime="2025-12-11 10:56:43.040248242 +0000 UTC m=+1319.858807821" Dec 11 10:56:43 crc kubenswrapper[5016]: I1211 10:56:43.044450 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8542c44c-4c37-431f-a2b1-7ff93d36f4d0","Type":"ContainerStarted","Data":"1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb"} Dec 11 10:56:43 crc kubenswrapper[5016]: I1211 10:56:43.104617 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-g7htz"] Dec 11 10:56:43 crc kubenswrapper[5016]: I1211 10:56:43.348155 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-76bc74566d-xxk9f"] Dec 11 10:56:43 crc kubenswrapper[5016]: W1211 10:56:43.385219 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b4a7e5c_045f_434d_8744_60b045803cc3.slice/crio-8649f8e1e19d247897aa0461860423d1df0332ca108cbffa5205e02012a6eb6e WatchSource:0}: Error finding container 8649f8e1e19d247897aa0461860423d1df0332ca108cbffa5205e02012a6eb6e: Status 404 returned error can't find the container with id 8649f8e1e19d247897aa0461860423d1df0332ca108cbffa5205e02012a6eb6e Dec 11 10:56:44 crc kubenswrapper[5016]: I1211 10:56:44.098227 5016 generic.go:334] "Generic (PLEG): container finished" podID="75099d8b-eafe-4f65-867b-877541873100" containerID="73c28a2a270e0fb07059ba6c76b150e10a472dad21103d5220aa9bb4f862c17d" exitCode=0 Dec 11 10:56:44 crc kubenswrapper[5016]: I1211 10:56:44.098723 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" event={"ID":"75099d8b-eafe-4f65-867b-877541873100","Type":"ContainerDied","Data":"73c28a2a270e0fb07059ba6c76b150e10a472dad21103d5220aa9bb4f862c17d"} Dec 11 10:56:44 crc kubenswrapper[5016]: I1211 10:56:44.099195 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" event={"ID":"75099d8b-eafe-4f65-867b-877541873100","Type":"ContainerStarted","Data":"122c5af2c647aabdbad104c11af20d0eb384925785cbb3e8a98cfc353cb6218d"} Dec 11 10:56:44 crc kubenswrapper[5016]: I1211 10:56:44.143406 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8542c44c-4c37-431f-a2b1-7ff93d36f4d0","Type":"ContainerStarted","Data":"5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d"} Dec 11 10:56:44 crc kubenswrapper[5016]: I1211 10:56:44.151036 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-76bc74566d-xxk9f" event={"ID":"4b4a7e5c-045f-434d-8744-60b045803cc3","Type":"ContainerStarted","Data":"e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205"} Dec 11 10:56:44 crc kubenswrapper[5016]: I1211 10:56:44.151074 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-76bc74566d-xxk9f" event={"ID":"4b4a7e5c-045f-434d-8744-60b045803cc3","Type":"ContainerStarted","Data":"8649f8e1e19d247897aa0461860423d1df0332ca108cbffa5205e02012a6eb6e"} Dec 11 10:56:44 crc kubenswrapper[5016]: I1211 10:56:44.154688 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057","Type":"ContainerStarted","Data":"12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5"} Dec 11 10:56:44 crc kubenswrapper[5016]: I1211 10:56:44.183015 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=14.182993689 podStartE2EDuration="14.182993689s" podCreationTimestamp="2025-12-11 10:56:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:44.178755115 +0000 UTC m=+1320.997314694" watchObservedRunningTime="2025-12-11 10:56:44.182993689 +0000 UTC m=+1321.001553268" Dec 11 10:56:44 crc kubenswrapper[5016]: I1211 10:56:44.206907 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.206890063 podStartE2EDuration="5.206890063s" podCreationTimestamp="2025-12-11 10:56:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:44.203145432 +0000 UTC m=+1321.021705021" watchObservedRunningTime="2025-12-11 10:56:44.206890063 +0000 UTC m=+1321.025449642" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.173714 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" event={"ID":"75099d8b-eafe-4f65-867b-877541873100","Type":"ContainerStarted","Data":"3c1d6fdc8a85e6dc92f96850e8bcffefe8c3c565d06b45c8cc194affe83a50d8"} Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.177290 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.179564 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-76bc74566d-xxk9f" event={"ID":"4b4a7e5c-045f-434d-8744-60b045803cc3","Type":"ContainerStarted","Data":"55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7"} Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.206598 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" podStartSLOduration=3.206573171 podStartE2EDuration="3.206573171s" podCreationTimestamp="2025-12-11 10:56:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:45.202064351 +0000 UTC m=+1322.020623950" watchObservedRunningTime="2025-12-11 10:56:45.206573171 +0000 UTC m=+1322.025132750" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.244515 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-76bc74566d-xxk9f" podStartSLOduration=3.244491839 podStartE2EDuration="3.244491839s" podCreationTimestamp="2025-12-11 10:56:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:45.232065905 +0000 UTC m=+1322.050625494" watchObservedRunningTime="2025-12-11 10:56:45.244491839 +0000 UTC m=+1322.063051418" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.646475 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5d4bc555dc-hjmj8"] Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.648184 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.654782 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.655179 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.686547 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5d4bc555dc-hjmj8"] Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.711763 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-ovndb-tls-certs\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.711832 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx8km\" (UniqueName: \"kubernetes.io/projected/a420329b-5657-402b-8b2c-c6f53beda0d6-kube-api-access-vx8km\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.711861 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-combined-ca-bundle\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.712079 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-public-tls-certs\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.712302 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-config\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.712440 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-httpd-config\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.712492 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-internal-tls-certs\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.815526 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-internal-tls-certs\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.815798 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-ovndb-tls-certs\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.815995 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx8km\" (UniqueName: \"kubernetes.io/projected/a420329b-5657-402b-8b2c-c6f53beda0d6-kube-api-access-vx8km\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.816041 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-combined-ca-bundle\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.816098 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-public-tls-certs\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.816229 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-config\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.816274 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-httpd-config\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.825457 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-internal-tls-certs\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.826603 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-httpd-config\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.829409 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-ovndb-tls-certs\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.832761 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-combined-ca-bundle\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.834966 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx8km\" (UniqueName: \"kubernetes.io/projected/a420329b-5657-402b-8b2c-c6f53beda0d6-kube-api-access-vx8km\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.847060 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-config\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.854071 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a420329b-5657-402b-8b2c-c6f53beda0d6-public-tls-certs\") pod \"neutron-5d4bc555dc-hjmj8\" (UID: \"a420329b-5657-402b-8b2c-c6f53beda0d6\") " pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:45 crc kubenswrapper[5016]: I1211 10:56:45.982269 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:46 crc kubenswrapper[5016]: I1211 10:56:46.212453 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vmt79" event={"ID":"b4b58628-6bc5-4fab-b806-9c1f615c006c","Type":"ContainerStarted","Data":"5b79cda94f5111ba9acbe94ca34b9803ca098509090c7de1d134798fe1282cb7"} Dec 11 10:56:46 crc kubenswrapper[5016]: I1211 10:56:46.217774 5016 generic.go:334] "Generic (PLEG): container finished" podID="ae211270-86fb-4d5e-a028-49d60d9a6685" containerID="9da786d94b83b298510438b22c45c1c64b2533494fe7d6bde5b66f709c2d31bc" exitCode=0 Dec 11 10:56:46 crc kubenswrapper[5016]: I1211 10:56:46.218268 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-44rj9" event={"ID":"ae211270-86fb-4d5e-a028-49d60d9a6685","Type":"ContainerDied","Data":"9da786d94b83b298510438b22c45c1c64b2533494fe7d6bde5b66f709c2d31bc"} Dec 11 10:56:46 crc kubenswrapper[5016]: I1211 10:56:46.218974 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:56:46 crc kubenswrapper[5016]: I1211 10:56:46.239327 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-vmt79" podStartSLOduration=4.52863835 podStartE2EDuration="38.239304808s" podCreationTimestamp="2025-12-11 10:56:08 +0000 UTC" firstStartedPulling="2025-12-11 10:56:10.767558371 +0000 UTC m=+1287.586117940" lastFinishedPulling="2025-12-11 10:56:44.478224819 +0000 UTC m=+1321.296784398" observedRunningTime="2025-12-11 10:56:46.22915475 +0000 UTC m=+1323.047714329" watchObservedRunningTime="2025-12-11 10:56:46.239304808 +0000 UTC m=+1323.057864387" Dec 11 10:56:47 crc kubenswrapper[5016]: I1211 10:56:47.235740 5016 generic.go:334] "Generic (PLEG): container finished" podID="1091d9e0-69c8-499d-bf06-7aacc52d8ec6" containerID="ccfcb3457a53b85eb1d0d770f71d50169d52f4bc04e8c43a3ff5c9bb83231090" exitCode=0 Dec 11 10:56:47 crc kubenswrapper[5016]: I1211 10:56:47.235823 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-ckqjx" event={"ID":"1091d9e0-69c8-499d-bf06-7aacc52d8ec6","Type":"ContainerDied","Data":"ccfcb3457a53b85eb1d0d770f71d50169d52f4bc04e8c43a3ff5c9bb83231090"} Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.249354 5016 generic.go:334] "Generic (PLEG): container finished" podID="b4b58628-6bc5-4fab-b806-9c1f615c006c" containerID="5b79cda94f5111ba9acbe94ca34b9803ca098509090c7de1d134798fe1282cb7" exitCode=0 Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.249458 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vmt79" event={"ID":"b4b58628-6bc5-4fab-b806-9c1f615c006c","Type":"ContainerDied","Data":"5b79cda94f5111ba9acbe94ca34b9803ca098509090c7de1d134798fe1282cb7"} Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.319549 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.319602 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.448235 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.448870 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.618982 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.693784 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae211270-86fb-4d5e-a028-49d60d9a6685-combined-ca-bundle\") pod \"ae211270-86fb-4d5e-a028-49d60d9a6685\" (UID: \"ae211270-86fb-4d5e-a028-49d60d9a6685\") " Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.693886 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfjhw\" (UniqueName: \"kubernetes.io/projected/ae211270-86fb-4d5e-a028-49d60d9a6685-kube-api-access-rfjhw\") pod \"ae211270-86fb-4d5e-a028-49d60d9a6685\" (UID: \"ae211270-86fb-4d5e-a028-49d60d9a6685\") " Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.694050 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ae211270-86fb-4d5e-a028-49d60d9a6685-db-sync-config-data\") pod \"ae211270-86fb-4d5e-a028-49d60d9a6685\" (UID: \"ae211270-86fb-4d5e-a028-49d60d9a6685\") " Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.702256 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae211270-86fb-4d5e-a028-49d60d9a6685-kube-api-access-rfjhw" (OuterVolumeSpecName: "kube-api-access-rfjhw") pod "ae211270-86fb-4d5e-a028-49d60d9a6685" (UID: "ae211270-86fb-4d5e-a028-49d60d9a6685"). InnerVolumeSpecName "kube-api-access-rfjhw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.703465 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae211270-86fb-4d5e-a028-49d60d9a6685-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ae211270-86fb-4d5e-a028-49d60d9a6685" (UID: "ae211270-86fb-4d5e-a028-49d60d9a6685"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.728234 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae211270-86fb-4d5e-a028-49d60d9a6685-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ae211270-86fb-4d5e-a028-49d60d9a6685" (UID: "ae211270-86fb-4d5e-a028-49d60d9a6685"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.796082 5016 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ae211270-86fb-4d5e-a028-49d60d9a6685-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.796126 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae211270-86fb-4d5e-a028-49d60d9a6685-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:48 crc kubenswrapper[5016]: I1211 10:56:48.796143 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfjhw\" (UniqueName: \"kubernetes.io/projected/ae211270-86fb-4d5e-a028-49d60d9a6685-kube-api-access-rfjhw\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.276700 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-44rj9" event={"ID":"ae211270-86fb-4d5e-a028-49d60d9a6685","Type":"ContainerDied","Data":"9fb64f12f09e27aba6ec46839d20a1bab1cc7d75cb22617a6541b450e320d13b"} Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.276771 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9fb64f12f09e27aba6ec46839d20a1bab1cc7d75cb22617a6541b450e320d13b" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.277048 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-44rj9" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.608395 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.610538 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.677006 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.686565 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.866723 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-559df4c4fd-rpdct"] Dec 11 10:56:49 crc kubenswrapper[5016]: E1211 10:56:49.867388 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae211270-86fb-4d5e-a028-49d60d9a6685" containerName="barbican-db-sync" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.867414 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae211270-86fb-4d5e-a028-49d60d9a6685" containerName="barbican-db-sync" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.867700 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae211270-86fb-4d5e-a028-49d60d9a6685" containerName="barbican-db-sync" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.869002 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.876561 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.876736 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-wqc95" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.876855 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.877844 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-5f9f9b6559-f78rz"] Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.879457 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.899565 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.911048 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-559df4c4fd-rpdct"] Dec 11 10:56:49 crc kubenswrapper[5016]: I1211 10:56:49.927461 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5f9f9b6559-f78rz"] Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.019458 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-config-data\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.019548 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-config-data\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.019573 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsqzf\" (UniqueName: \"kubernetes.io/projected/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-kube-api-access-gsqzf\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.019599 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-combined-ca-bundle\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.019623 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-config-data-custom\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.019663 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-logs\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.019732 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-config-data-custom\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.019752 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhg9l\" (UniqueName: \"kubernetes.io/projected/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-kube-api-access-rhg9l\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.019776 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-combined-ca-bundle\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.019796 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-logs\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.097990 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-g7htz"] Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.098594 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" podUID="75099d8b-eafe-4f65-867b-877541873100" containerName="dnsmasq-dns" containerID="cri-o://3c1d6fdc8a85e6dc92f96850e8bcffefe8c3c565d06b45c8cc194affe83a50d8" gracePeriod=10 Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.101167 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.124334 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-config-data\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.124617 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-config-data\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.124681 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsqzf\" (UniqueName: \"kubernetes.io/projected/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-kube-api-access-gsqzf\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.124735 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-combined-ca-bundle\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.124768 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-config-data-custom\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.124897 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-logs\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.125100 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-config-data-custom\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.125143 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhg9l\" (UniqueName: \"kubernetes.io/projected/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-kube-api-access-rhg9l\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.125205 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-combined-ca-bundle\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.125245 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-logs\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.126203 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-logs\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.130912 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-logs\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.144686 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-config-data\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.145651 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-config-data-custom\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.147458 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-config-data\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.148616 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-config-data-custom\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.154694 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsqzf\" (UniqueName: \"kubernetes.io/projected/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-kube-api-access-gsqzf\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.158207 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-pd9p5"] Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.165101 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad5059d-bfd5-4ea8-8d6a-898cd592e49d-combined-ca-bundle\") pod \"barbican-keystone-listener-559df4c4fd-rpdct\" (UID: \"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d\") " pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.172643 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-pd9p5"] Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.172748 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.178157 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-combined-ca-bundle\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.182915 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhg9l\" (UniqueName: \"kubernetes.io/projected/266c2ca6-fea6-4f3d-8796-bd0db83f2bf0-kube-api-access-rhg9l\") pod \"barbican-worker-5f9f9b6559-f78rz\" (UID: \"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0\") " pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.211349 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.224192 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5f9f9b6559-f78rz" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.278881 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7d75dc7f86-8kj4j"] Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.286742 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.290410 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.296628 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7d75dc7f86-8kj4j"] Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.341367 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.341412 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.347315 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.347405 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.347488 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.347558 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7k4d\" (UniqueName: \"kubernetes.io/projected/224a0072-ebea-4ed2-9b57-af3df41200c5-kube-api-access-k7k4d\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.347752 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-config\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.347854 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.449563 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7k4d\" (UniqueName: \"kubernetes.io/projected/224a0072-ebea-4ed2-9b57-af3df41200c5-kube-api-access-k7k4d\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.449676 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-config-data\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.449703 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8d47a18-0c9f-4126-8e93-5ba2544b1480-logs\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.449727 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-config\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.449758 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-config-data-custom\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.449792 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-combined-ca-bundle\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.449823 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.449903 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7zrr\" (UniqueName: \"kubernetes.io/projected/f8d47a18-0c9f-4126-8e93-5ba2544b1480-kube-api-access-z7zrr\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.449993 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.450037 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.450067 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.451917 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.452994 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.453789 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.453905 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.454331 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-config\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.472915 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7k4d\" (UniqueName: \"kubernetes.io/projected/224a0072-ebea-4ed2-9b57-af3df41200c5-kube-api-access-k7k4d\") pod \"dnsmasq-dns-848cf88cfc-pd9p5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.552335 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-config-data\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.552399 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8d47a18-0c9f-4126-8e93-5ba2544b1480-logs\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.552597 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-config-data-custom\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.552674 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-combined-ca-bundle\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.552831 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7zrr\" (UniqueName: \"kubernetes.io/projected/f8d47a18-0c9f-4126-8e93-5ba2544b1480-kube-api-access-z7zrr\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.555069 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8d47a18-0c9f-4126-8e93-5ba2544b1480-logs\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.559628 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-combined-ca-bundle\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.560015 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-config-data-custom\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.560527 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-config-data\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.577067 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7zrr\" (UniqueName: \"kubernetes.io/projected/f8d47a18-0c9f-4126-8e93-5ba2544b1480-kube-api-access-z7zrr\") pod \"barbican-api-7d75dc7f86-8kj4j\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.583633 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:50 crc kubenswrapper[5016]: I1211 10:56:50.684263 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:51 crc kubenswrapper[5016]: I1211 10:56:51.030642 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:51 crc kubenswrapper[5016]: I1211 10:56:51.030946 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:51 crc kubenswrapper[5016]: I1211 10:56:51.103681 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:51 crc kubenswrapper[5016]: I1211 10:56:51.104925 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:51 crc kubenswrapper[5016]: I1211 10:56:51.355780 5016 generic.go:334] "Generic (PLEG): container finished" podID="75099d8b-eafe-4f65-867b-877541873100" containerID="3c1d6fdc8a85e6dc92f96850e8bcffefe8c3c565d06b45c8cc194affe83a50d8" exitCode=0 Dec 11 10:56:51 crc kubenswrapper[5016]: I1211 10:56:51.355865 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" event={"ID":"75099d8b-eafe-4f65-867b-877541873100","Type":"ContainerDied","Data":"3c1d6fdc8a85e6dc92f96850e8bcffefe8c3c565d06b45c8cc194affe83a50d8"} Dec 11 10:56:51 crc kubenswrapper[5016]: I1211 10:56:51.356658 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:51 crc kubenswrapper[5016]: I1211 10:56:51.356686 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:52 crc kubenswrapper[5016]: I1211 10:56:52.366132 5016 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 10:56:52 crc kubenswrapper[5016]: I1211 10:56:52.366478 5016 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 10:56:52 crc kubenswrapper[5016]: I1211 10:56:52.469473 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" podUID="75099d8b-eafe-4f65-867b-877541873100" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.149:5353: connect: connection refused" Dec 11 10:56:52 crc kubenswrapper[5016]: I1211 10:56:52.905429 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-75d7945896-vvw5x"] Dec 11 10:56:52 crc kubenswrapper[5016]: I1211 10:56:52.907208 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:52 crc kubenswrapper[5016]: I1211 10:56:52.913473 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Dec 11 10:56:52 crc kubenswrapper[5016]: I1211 10:56:52.913611 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Dec 11 10:56:52 crc kubenswrapper[5016]: I1211 10:56:52.933881 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-75d7945896-vvw5x"] Dec 11 10:56:52 crc kubenswrapper[5016]: I1211 10:56:52.962876 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.007920 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-config-data-custom\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.008198 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-combined-ca-bundle\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.008404 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-internal-tls-certs\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.008465 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-public-tls-certs\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.008505 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-config-data\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.008548 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42eab2b8-1142-4d4f-bb8a-58736349fd7e-logs\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.008571 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4tsc\" (UniqueName: \"kubernetes.io/projected/42eab2b8-1142-4d4f-bb8a-58736349fd7e-kube-api-access-w4tsc\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.110294 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-public-tls-certs\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.110345 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-config-data\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.110368 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42eab2b8-1142-4d4f-bb8a-58736349fd7e-logs\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.110392 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4tsc\" (UniqueName: \"kubernetes.io/projected/42eab2b8-1142-4d4f-bb8a-58736349fd7e-kube-api-access-w4tsc\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.110451 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-config-data-custom\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.110506 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-combined-ca-bundle\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.110571 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-internal-tls-certs\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.111055 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42eab2b8-1142-4d4f-bb8a-58736349fd7e-logs\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.117893 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-internal-tls-certs\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.117968 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-config-data-custom\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.118331 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-public-tls-certs\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.119426 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-combined-ca-bundle\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.120430 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42eab2b8-1142-4d4f-bb8a-58736349fd7e-config-data\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.134732 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4tsc\" (UniqueName: \"kubernetes.io/projected/42eab2b8-1142-4d4f-bb8a-58736349fd7e-kube-api-access-w4tsc\") pod \"barbican-api-75d7945896-vvw5x\" (UID: \"42eab2b8-1142-4d4f-bb8a-58736349fd7e\") " pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.140496 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.227120 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.294182 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.298398 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.385509 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vmt79" event={"ID":"b4b58628-6bc5-4fab-b806-9c1f615c006c","Type":"ContainerDied","Data":"0c412c4a6b0588e77d2d09a790a5c6abbed4c393526292ed27ec1a606f6c324d"} Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.385568 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c412c4a6b0588e77d2d09a790a5c6abbed4c393526292ed27ec1a606f6c324d" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.385701 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vmt79" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.388221 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-ckqjx" event={"ID":"1091d9e0-69c8-499d-bf06-7aacc52d8ec6","Type":"ContainerDied","Data":"29ea3cc4b919e3d9b90553a34fd7fd1cc2fa2c26123de04e40223720488d38da"} Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.388276 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29ea3cc4b919e3d9b90553a34fd7fd1cc2fa2c26123de04e40223720488d38da" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.389980 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-ckqjx" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.418837 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znscv\" (UniqueName: \"kubernetes.io/projected/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-kube-api-access-znscv\") pod \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.419234 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-combined-ca-bundle\") pod \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.419296 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdjwd\" (UniqueName: \"kubernetes.io/projected/b4b58628-6bc5-4fab-b806-9c1f615c006c-kube-api-access-cdjwd\") pod \"b4b58628-6bc5-4fab-b806-9c1f615c006c\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.419332 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-credential-keys\") pod \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.419493 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-config-data\") pod \"b4b58628-6bc5-4fab-b806-9c1f615c006c\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.419537 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-fernet-keys\") pod \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.419612 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-config-data\") pod \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.419643 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4b58628-6bc5-4fab-b806-9c1f615c006c-logs\") pod \"b4b58628-6bc5-4fab-b806-9c1f615c006c\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.419689 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-combined-ca-bundle\") pod \"b4b58628-6bc5-4fab-b806-9c1f615c006c\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.419713 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-scripts\") pod \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\" (UID: \"1091d9e0-69c8-499d-bf06-7aacc52d8ec6\") " Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.419780 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-scripts\") pod \"b4b58628-6bc5-4fab-b806-9c1f615c006c\" (UID: \"b4b58628-6bc5-4fab-b806-9c1f615c006c\") " Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.425488 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4b58628-6bc5-4fab-b806-9c1f615c006c-logs" (OuterVolumeSpecName: "logs") pod "b4b58628-6bc5-4fab-b806-9c1f615c006c" (UID: "b4b58628-6bc5-4fab-b806-9c1f615c006c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.429029 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-kube-api-access-znscv" (OuterVolumeSpecName: "kube-api-access-znscv") pod "1091d9e0-69c8-499d-bf06-7aacc52d8ec6" (UID: "1091d9e0-69c8-499d-bf06-7aacc52d8ec6"). InnerVolumeSpecName "kube-api-access-znscv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.429674 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "1091d9e0-69c8-499d-bf06-7aacc52d8ec6" (UID: "1091d9e0-69c8-499d-bf06-7aacc52d8ec6"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.433243 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-scripts" (OuterVolumeSpecName: "scripts") pod "b4b58628-6bc5-4fab-b806-9c1f615c006c" (UID: "b4b58628-6bc5-4fab-b806-9c1f615c006c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.440091 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "1091d9e0-69c8-499d-bf06-7aacc52d8ec6" (UID: "1091d9e0-69c8-499d-bf06-7aacc52d8ec6"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.444054 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-scripts" (OuterVolumeSpecName: "scripts") pod "1091d9e0-69c8-499d-bf06-7aacc52d8ec6" (UID: "1091d9e0-69c8-499d-bf06-7aacc52d8ec6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.453335 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4b58628-6bc5-4fab-b806-9c1f615c006c-kube-api-access-cdjwd" (OuterVolumeSpecName: "kube-api-access-cdjwd") pod "b4b58628-6bc5-4fab-b806-9c1f615c006c" (UID: "b4b58628-6bc5-4fab-b806-9c1f615c006c"). InnerVolumeSpecName "kube-api-access-cdjwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.471095 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1091d9e0-69c8-499d-bf06-7aacc52d8ec6" (UID: "1091d9e0-69c8-499d-bf06-7aacc52d8ec6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.495031 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-config-data" (OuterVolumeSpecName: "config-data") pod "1091d9e0-69c8-499d-bf06-7aacc52d8ec6" (UID: "1091d9e0-69c8-499d-bf06-7aacc52d8ec6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.521920 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdjwd\" (UniqueName: \"kubernetes.io/projected/b4b58628-6bc5-4fab-b806-9c1f615c006c-kube-api-access-cdjwd\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.521967 5016 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.521977 5016 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.521988 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.521997 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4b58628-6bc5-4fab-b806-9c1f615c006c-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.522006 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.522014 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.522023 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znscv\" (UniqueName: \"kubernetes.io/projected/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-kube-api-access-znscv\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.522033 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1091d9e0-69c8-499d-bf06-7aacc52d8ec6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.551329 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4b58628-6bc5-4fab-b806-9c1f615c006c" (UID: "b4b58628-6bc5-4fab-b806-9c1f615c006c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.574110 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-config-data" (OuterVolumeSpecName: "config-data") pod "b4b58628-6bc5-4fab-b806-9c1f615c006c" (UID: "b4b58628-6bc5-4fab-b806-9c1f615c006c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.624025 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.624398 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b58628-6bc5-4fab-b806-9c1f615c006c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:53 crc kubenswrapper[5016]: I1211 10:56:53.932897 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.036902 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-ovsdbserver-sb\") pod \"75099d8b-eafe-4f65-867b-877541873100\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.037439 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-ovsdbserver-nb\") pod \"75099d8b-eafe-4f65-867b-877541873100\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.037467 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-dns-svc\") pod \"75099d8b-eafe-4f65-867b-877541873100\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.039142 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-dns-swift-storage-0\") pod \"75099d8b-eafe-4f65-867b-877541873100\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.039345 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-config\") pod \"75099d8b-eafe-4f65-867b-877541873100\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.039406 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2nvd\" (UniqueName: \"kubernetes.io/projected/75099d8b-eafe-4f65-867b-877541873100-kube-api-access-p2nvd\") pod \"75099d8b-eafe-4f65-867b-877541873100\" (UID: \"75099d8b-eafe-4f65-867b-877541873100\") " Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.054511 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75099d8b-eafe-4f65-867b-877541873100-kube-api-access-p2nvd" (OuterVolumeSpecName: "kube-api-access-p2nvd") pod "75099d8b-eafe-4f65-867b-877541873100" (UID: "75099d8b-eafe-4f65-867b-877541873100"). InnerVolumeSpecName "kube-api-access-p2nvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.131663 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "75099d8b-eafe-4f65-867b-877541873100" (UID: "75099d8b-eafe-4f65-867b-877541873100"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.141867 5016 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.141918 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2nvd\" (UniqueName: \"kubernetes.io/projected/75099d8b-eafe-4f65-867b-877541873100-kube-api-access-p2nvd\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.146501 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "75099d8b-eafe-4f65-867b-877541873100" (UID: "75099d8b-eafe-4f65-867b-877541873100"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.147611 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "75099d8b-eafe-4f65-867b-877541873100" (UID: "75099d8b-eafe-4f65-867b-877541873100"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.178092 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "75099d8b-eafe-4f65-867b-877541873100" (UID: "75099d8b-eafe-4f65-867b-877541873100"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.188813 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-config" (OuterVolumeSpecName: "config") pod "75099d8b-eafe-4f65-867b-877541873100" (UID: "75099d8b-eafe-4f65-867b-877541873100"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.194103 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7d75dc7f86-8kj4j"] Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.247983 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.248332 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.248370 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.248379 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75099d8b-eafe-4f65-867b-877541873100-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.406314 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5f9f9b6559-f78rz"] Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.413918 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" event={"ID":"75099d8b-eafe-4f65-867b-877541873100","Type":"ContainerDied","Data":"122c5af2c647aabdbad104c11af20d0eb384925785cbb3e8a98cfc353cb6218d"} Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.413984 5016 scope.go:117] "RemoveContainer" containerID="3c1d6fdc8a85e6dc92f96850e8bcffefe8c3c565d06b45c8cc194affe83a50d8" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.414121 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-g7htz" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.434443 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7d75dc7f86-8kj4j" event={"ID":"f8d47a18-0c9f-4126-8e93-5ba2544b1480","Type":"ContainerStarted","Data":"da4ecaa3843ba06ff00ea6d7e8a20a3106b88d149077d74feacf469753aff6ea"} Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.439033 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-559df4c4fd-rpdct"] Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.485969 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-pd9p5"] Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.549068 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-676fd6784-tg4g7"] Dec 11 10:56:54 crc kubenswrapper[5016]: E1211 10:56:54.549610 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75099d8b-eafe-4f65-867b-877541873100" containerName="dnsmasq-dns" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.549629 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="75099d8b-eafe-4f65-867b-877541873100" containerName="dnsmasq-dns" Dec 11 10:56:54 crc kubenswrapper[5016]: E1211 10:56:54.549641 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1091d9e0-69c8-499d-bf06-7aacc52d8ec6" containerName="keystone-bootstrap" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.549649 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1091d9e0-69c8-499d-bf06-7aacc52d8ec6" containerName="keystone-bootstrap" Dec 11 10:56:54 crc kubenswrapper[5016]: E1211 10:56:54.549675 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75099d8b-eafe-4f65-867b-877541873100" containerName="init" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.549683 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="75099d8b-eafe-4f65-867b-877541873100" containerName="init" Dec 11 10:56:54 crc kubenswrapper[5016]: E1211 10:56:54.549695 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4b58628-6bc5-4fab-b806-9c1f615c006c" containerName="placement-db-sync" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.549702 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4b58628-6bc5-4fab-b806-9c1f615c006c" containerName="placement-db-sync" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.549925 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1091d9e0-69c8-499d-bf06-7aacc52d8ec6" containerName="keystone-bootstrap" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.549996 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4b58628-6bc5-4fab-b806-9c1f615c006c" containerName="placement-db-sync" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.550053 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="75099d8b-eafe-4f65-867b-877541873100" containerName="dnsmasq-dns" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.551469 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.556300 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.558868 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.559315 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.559643 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-8k4lc" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.559892 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.567448 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-676fd6784-tg4g7"] Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.587507 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-589444b9f8-c7wwh"] Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.591991 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.596612 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.596837 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.597471 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.597687 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.597809 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.598033 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-g8dhv" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.607998 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-589444b9f8-c7wwh"] Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.618120 5016 scope.go:117] "RemoveContainer" containerID="73c28a2a270e0fb07059ba6c76b150e10a472dad21103d5220aa9bb4f862c17d" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.638498 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-75d7945896-vvw5x"] Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.669790 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-g7htz"] Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.670500 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-credential-keys\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.670564 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-public-tls-certs\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.670602 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-internal-tls-certs\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.670664 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-combined-ca-bundle\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.670732 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-scripts\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.670772 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bm2gn\" (UniqueName: \"kubernetes.io/projected/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-kube-api-access-bm2gn\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.670815 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/668906e8-a695-43ee-aca4-5b1bd13053eb-logs\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.670841 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-fernet-keys\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.670871 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-scripts\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.670904 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmcw2\" (UniqueName: \"kubernetes.io/projected/668906e8-a695-43ee-aca4-5b1bd13053eb-kube-api-access-jmcw2\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.670930 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-public-tls-certs\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.671306 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-combined-ca-bundle\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.671421 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-config-data\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.671684 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-internal-tls-certs\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.671742 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-config-data\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.678230 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.678348 5016 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.682232 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.684735 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-g7htz"] Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.696925 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5d4bc555dc-hjmj8"] Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.781639 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-credential-keys\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.781716 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-public-tls-certs\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.781773 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-internal-tls-certs\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.781865 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-combined-ca-bundle\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.781927 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-scripts\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.781989 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bm2gn\" (UniqueName: \"kubernetes.io/projected/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-kube-api-access-bm2gn\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.782048 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/668906e8-a695-43ee-aca4-5b1bd13053eb-logs\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.782085 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-fernet-keys\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.782125 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-scripts\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.782179 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmcw2\" (UniqueName: \"kubernetes.io/projected/668906e8-a695-43ee-aca4-5b1bd13053eb-kube-api-access-jmcw2\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.782217 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-public-tls-certs\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.782310 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-combined-ca-bundle\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.782401 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-config-data\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.782450 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-internal-tls-certs\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.782488 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-config-data\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.790788 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-public-tls-certs\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.790900 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-credential-keys\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.793313 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/668906e8-a695-43ee-aca4-5b1bd13053eb-logs\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.795369 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-public-tls-certs\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.802559 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-internal-tls-certs\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.810488 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-internal-tls-certs\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.816200 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-combined-ca-bundle\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.818700 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-scripts\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.819230 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-scripts\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.823920 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/668906e8-a695-43ee-aca4-5b1bd13053eb-config-data\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.834357 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-combined-ca-bundle\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.848869 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm2gn\" (UniqueName: \"kubernetes.io/projected/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-kube-api-access-bm2gn\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.851103 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-config-data\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.851473 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/207fc475-2260-4b2f-86a9-c4c0bedf3ce1-fernet-keys\") pod \"keystone-589444b9f8-c7wwh\" (UID: \"207fc475-2260-4b2f-86a9-c4c0bedf3ce1\") " pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.867637 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmcw2\" (UniqueName: \"kubernetes.io/projected/668906e8-a695-43ee-aca4-5b1bd13053eb-kube-api-access-jmcw2\") pod \"placement-676fd6784-tg4g7\" (UID: \"668906e8-a695-43ee-aca4-5b1bd13053eb\") " pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.913852 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:54 crc kubenswrapper[5016]: I1211 10:56:54.959921 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:55 crc kubenswrapper[5016]: I1211 10:56:55.511749 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75099d8b-eafe-4f65-867b-877541873100" path="/var/lib/kubelet/pods/75099d8b-eafe-4f65-867b-877541873100/volumes" Dec 11 10:56:55 crc kubenswrapper[5016]: I1211 10:56:55.528273 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-75d7945896-vvw5x" event={"ID":"42eab2b8-1142-4d4f-bb8a-58736349fd7e","Type":"ContainerStarted","Data":"5052d2b96e3f5b965d4565655146502c964715781074d3b82bb430ac3e14dc7b"} Dec 11 10:56:55 crc kubenswrapper[5016]: I1211 10:56:55.529462 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" event={"ID":"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d","Type":"ContainerStarted","Data":"2caa4a45d775c1fed310a9fd80b4f58fc44baf25eaaae21bf403260444cf61d5"} Dec 11 10:56:55 crc kubenswrapper[5016]: I1211 10:56:55.530375 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5d4bc555dc-hjmj8" event={"ID":"a420329b-5657-402b-8b2c-c6f53beda0d6","Type":"ContainerStarted","Data":"30393e0409a06b547ce1648addf858da6dedfb70ecdacb344073f2d063f3930d"} Dec 11 10:56:55 crc kubenswrapper[5016]: I1211 10:56:55.538400 5016 generic.go:334] "Generic (PLEG): container finished" podID="224a0072-ebea-4ed2-9b57-af3df41200c5" containerID="ff8ae82f6bbc99792abf528b71021bb7957e55871e76f100c7a660d288f2d041" exitCode=0 Dec 11 10:56:55 crc kubenswrapper[5016]: I1211 10:56:55.538487 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" event={"ID":"224a0072-ebea-4ed2-9b57-af3df41200c5","Type":"ContainerDied","Data":"ff8ae82f6bbc99792abf528b71021bb7957e55871e76f100c7a660d288f2d041"} Dec 11 10:56:55 crc kubenswrapper[5016]: I1211 10:56:55.538522 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" event={"ID":"224a0072-ebea-4ed2-9b57-af3df41200c5","Type":"ContainerStarted","Data":"d8b134364b1c2f65ce4dff435a683263fecad45c20df1dc1e4019c6ca9f7561c"} Dec 11 10:56:55 crc kubenswrapper[5016]: I1211 10:56:55.545192 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7d75dc7f86-8kj4j" event={"ID":"f8d47a18-0c9f-4126-8e93-5ba2544b1480","Type":"ContainerStarted","Data":"448d139089718b479bb8db1d152c382059860a37c2adedbff50cfd0dc385c2b4"} Dec 11 10:56:55 crc kubenswrapper[5016]: I1211 10:56:55.557424 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5f9f9b6559-f78rz" event={"ID":"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0","Type":"ContainerStarted","Data":"10092ce9f40fcf34bade3de39a5446ba3ee10020951571e3c5b0152c0fa38472"} Dec 11 10:56:55 crc kubenswrapper[5016]: I1211 10:56:55.833309 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-589444b9f8-c7wwh"] Dec 11 10:56:55 crc kubenswrapper[5016]: W1211 10:56:55.847509 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod207fc475_2260_4b2f_86a9_c4c0bedf3ce1.slice/crio-cb23512d6cd1479af734c7f31637bd436d6ddb759d2215e46e17ac6b5d481e87 WatchSource:0}: Error finding container cb23512d6cd1479af734c7f31637bd436d6ddb759d2215e46e17ac6b5d481e87: Status 404 returned error can't find the container with id cb23512d6cd1479af734c7f31637bd436d6ddb759d2215e46e17ac6b5d481e87 Dec 11 10:56:55 crc kubenswrapper[5016]: I1211 10:56:55.930562 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-676fd6784-tg4g7"] Dec 11 10:56:55 crc kubenswrapper[5016]: W1211 10:56:55.966752 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod668906e8_a695_43ee_aca4_5b1bd13053eb.slice/crio-7ba79c0fdf307ad74889724b1a865d745f2548f0689c94f299d0956197ed4fdd WatchSource:0}: Error finding container 7ba79c0fdf307ad74889724b1a865d745f2548f0689c94f299d0956197ed4fdd: Status 404 returned error can't find the container with id 7ba79c0fdf307ad74889724b1a865d745f2548f0689c94f299d0956197ed4fdd Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.611636 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49507f32-2b67-4dc4-a968-a691ca6c8454","Type":"ContainerStarted","Data":"da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c"} Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.621986 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-589444b9f8-c7wwh" event={"ID":"207fc475-2260-4b2f-86a9-c4c0bedf3ce1","Type":"ContainerStarted","Data":"aa507b3ec59ead8668f87b359d48dad995b4082aced7fd5dbdd4995797ce1fb4"} Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.622115 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-589444b9f8-c7wwh" event={"ID":"207fc475-2260-4b2f-86a9-c4c0bedf3ce1","Type":"ContainerStarted","Data":"cb23512d6cd1479af734c7f31637bd436d6ddb759d2215e46e17ac6b5d481e87"} Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.623091 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.638399 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-676fd6784-tg4g7" event={"ID":"668906e8-a695-43ee-aca4-5b1bd13053eb","Type":"ContainerStarted","Data":"3064308502b24a1fa8c0a3fe71fe58cf7aff02ee96da16525a3eb1933a713077"} Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.638459 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-676fd6784-tg4g7" event={"ID":"668906e8-a695-43ee-aca4-5b1bd13053eb","Type":"ContainerStarted","Data":"7ba79c0fdf307ad74889724b1a865d745f2548f0689c94f299d0956197ed4fdd"} Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.649949 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-75d7945896-vvw5x" event={"ID":"42eab2b8-1142-4d4f-bb8a-58736349fd7e","Type":"ContainerStarted","Data":"76b5d00401552754cedf7a69dda0f67347a90d61f1912cb9c138c8ede2d5ec2c"} Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.650007 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-75d7945896-vvw5x" event={"ID":"42eab2b8-1142-4d4f-bb8a-58736349fd7e","Type":"ContainerStarted","Data":"c2b53b7c8f7073601d11e82ed4aa802033e683ad4b8d37021cfcda51d0aba1b3"} Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.650807 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.650828 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.661341 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5d4bc555dc-hjmj8" event={"ID":"a420329b-5657-402b-8b2c-c6f53beda0d6","Type":"ContainerStarted","Data":"aa508d0867fdbc5e2dbc93e2b576a82f7ef7d1570776a395c8cbbfe8442344c8"} Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.661381 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5d4bc555dc-hjmj8" event={"ID":"a420329b-5657-402b-8b2c-c6f53beda0d6","Type":"ContainerStarted","Data":"3e97871a3147c3b0060f8365f060db0c69151cedaf8f40c11739a979fab15a32"} Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.662197 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.677802 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" event={"ID":"224a0072-ebea-4ed2-9b57-af3df41200c5","Type":"ContainerStarted","Data":"57b7ad75283d8bb53aaf969f78ec56f27beec9e99e52aaf26377aac252875b93"} Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.677953 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.680152 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-589444b9f8-c7wwh" podStartSLOduration=2.680126289 podStartE2EDuration="2.680126289s" podCreationTimestamp="2025-12-11 10:56:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:56.66870091 +0000 UTC m=+1333.487260509" watchObservedRunningTime="2025-12-11 10:56:56.680126289 +0000 UTC m=+1333.498685878" Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.684659 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7d75dc7f86-8kj4j" event={"ID":"f8d47a18-0c9f-4126-8e93-5ba2544b1480","Type":"ContainerStarted","Data":"cfd1756a2ff29b932665723464a5aa5ae38cf3f0ba2265742f8854f7df2b68fa"} Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.685171 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.685336 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.704850 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5d4bc555dc-hjmj8" podStartSLOduration=11.704830233 podStartE2EDuration="11.704830233s" podCreationTimestamp="2025-12-11 10:56:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:56.695854574 +0000 UTC m=+1333.514414143" watchObservedRunningTime="2025-12-11 10:56:56.704830233 +0000 UTC m=+1333.523389812" Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.722439 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-75d7945896-vvw5x" podStartSLOduration=4.722420623 podStartE2EDuration="4.722420623s" podCreationTimestamp="2025-12-11 10:56:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:56.717988995 +0000 UTC m=+1333.536548584" watchObservedRunningTime="2025-12-11 10:56:56.722420623 +0000 UTC m=+1333.540980202" Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.774208 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" podStartSLOduration=6.774173039 podStartE2EDuration="6.774173039s" podCreationTimestamp="2025-12-11 10:56:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:56.753665497 +0000 UTC m=+1333.572225086" watchObservedRunningTime="2025-12-11 10:56:56.774173039 +0000 UTC m=+1333.592732618" Dec 11 10:56:56 crc kubenswrapper[5016]: I1211 10:56:56.809689 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7d75dc7f86-8kj4j" podStartSLOduration=6.809662567 podStartE2EDuration="6.809662567s" podCreationTimestamp="2025-12-11 10:56:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:56.788706024 +0000 UTC m=+1333.607265623" watchObservedRunningTime="2025-12-11 10:56:56.809662567 +0000 UTC m=+1333.628222156" Dec 11 10:56:57 crc kubenswrapper[5016]: I1211 10:56:57.697366 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-676fd6784-tg4g7" event={"ID":"668906e8-a695-43ee-aca4-5b1bd13053eb","Type":"ContainerStarted","Data":"4c889598d8f57ca0d712f29e3b3cfda9d9b90edcf37a1d52033931086e05669d"} Dec 11 10:56:57 crc kubenswrapper[5016]: I1211 10:56:57.727268 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-676fd6784-tg4g7" podStartSLOduration=3.7272484280000002 podStartE2EDuration="3.727248428s" podCreationTimestamp="2025-12-11 10:56:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:56:57.719838506 +0000 UTC m=+1334.538398095" watchObservedRunningTime="2025-12-11 10:56:57.727248428 +0000 UTC m=+1334.545808007" Dec 11 10:56:58 crc kubenswrapper[5016]: I1211 10:56:58.321866 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-78bccb96bd-btt5f" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Dec 11 10:56:58 crc kubenswrapper[5016]: I1211 10:56:58.447484 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7df5fc4844-wdnrz" podUID="02741cc6-3a2a-48c1-b492-57762e0d75e6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Dec 11 10:56:58 crc kubenswrapper[5016]: I1211 10:56:58.712838 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5f9f9b6559-f78rz" event={"ID":"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0","Type":"ContainerStarted","Data":"2db71fde789f9025c3a7d7491c41ad652f24cec46c6820d905c89020ec5f259d"} Dec 11 10:56:58 crc kubenswrapper[5016]: I1211 10:56:58.716093 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" event={"ID":"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d","Type":"ContainerStarted","Data":"35546735a670b0d181cbc8b7fd0ac36cd19c710be4cdb1105c575d4951de4bfe"} Dec 11 10:56:58 crc kubenswrapper[5016]: I1211 10:56:58.716583 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:58 crc kubenswrapper[5016]: I1211 10:56:58.717467 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:56:59 crc kubenswrapper[5016]: I1211 10:56:59.728865 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" event={"ID":"2ad5059d-bfd5-4ea8-8d6a-898cd592e49d","Type":"ContainerStarted","Data":"8e1c6e6e1361219ca309e476d2ed8b20e5509b88542354501a0cf43163347058"} Dec 11 10:56:59 crc kubenswrapper[5016]: I1211 10:56:59.730714 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dgkjf" event={"ID":"d31ff49c-2515-4b93-b3b8-e776e3190ab7","Type":"ContainerStarted","Data":"12cce264a30a2a12ec5a15c1064e8eec542a78f676eef42f65b94a06a644a7d9"} Dec 11 10:56:59 crc kubenswrapper[5016]: I1211 10:56:59.735496 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5f9f9b6559-f78rz" event={"ID":"266c2ca6-fea6-4f3d-8796-bd0db83f2bf0","Type":"ContainerStarted","Data":"a7acec1b3c3eadf5253be3a9bc3b0dcc269a07ef4b7050d5c14e3f42179a30c7"} Dec 11 10:56:59 crc kubenswrapper[5016]: I1211 10:56:59.762763 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-559df4c4fd-rpdct" podStartSLOduration=7.031164048 podStartE2EDuration="10.762735647s" podCreationTimestamp="2025-12-11 10:56:49 +0000 UTC" firstStartedPulling="2025-12-11 10:56:54.485164929 +0000 UTC m=+1331.303724518" lastFinishedPulling="2025-12-11 10:56:58.216736538 +0000 UTC m=+1335.035296117" observedRunningTime="2025-12-11 10:56:59.751536093 +0000 UTC m=+1336.570095672" watchObservedRunningTime="2025-12-11 10:56:59.762735647 +0000 UTC m=+1336.581295236" Dec 11 10:56:59 crc kubenswrapper[5016]: I1211 10:56:59.779024 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-5f9f9b6559-f78rz" podStartSLOduration=7.001324708 podStartE2EDuration="10.779007555s" podCreationTimestamp="2025-12-11 10:56:49 +0000 UTC" firstStartedPulling="2025-12-11 10:56:54.439687987 +0000 UTC m=+1331.258247566" lastFinishedPulling="2025-12-11 10:56:58.217370834 +0000 UTC m=+1335.035930413" observedRunningTime="2025-12-11 10:56:59.773710246 +0000 UTC m=+1336.592269855" watchObservedRunningTime="2025-12-11 10:56:59.779007555 +0000 UTC m=+1336.597567134" Dec 11 10:56:59 crc kubenswrapper[5016]: I1211 10:56:59.799927 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-dgkjf" podStartSLOduration=3.490938362 podStartE2EDuration="51.799909567s" podCreationTimestamp="2025-12-11 10:56:08 +0000 UTC" firstStartedPulling="2025-12-11 10:56:09.914689213 +0000 UTC m=+1286.733248782" lastFinishedPulling="2025-12-11 10:56:58.223660408 +0000 UTC m=+1335.042219987" observedRunningTime="2025-12-11 10:56:59.790528747 +0000 UTC m=+1336.609088326" watchObservedRunningTime="2025-12-11 10:56:59.799909567 +0000 UTC m=+1336.618469146" Dec 11 10:57:00 crc kubenswrapper[5016]: I1211 10:57:00.603951 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:57:00 crc kubenswrapper[5016]: I1211 10:57:00.679899 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-42gmc"] Dec 11 10:57:00 crc kubenswrapper[5016]: I1211 10:57:00.680216 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" podUID="22451284-6148-4113-a7f5-7c7009092dbe" containerName="dnsmasq-dns" containerID="cri-o://c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249" gracePeriod=10 Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.489223 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.640228 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-ovsdbserver-nb\") pod \"22451284-6148-4113-a7f5-7c7009092dbe\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.640352 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-dns-svc\") pod \"22451284-6148-4113-a7f5-7c7009092dbe\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.640402 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-dns-swift-storage-0\") pod \"22451284-6148-4113-a7f5-7c7009092dbe\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.640505 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-ovsdbserver-sb\") pod \"22451284-6148-4113-a7f5-7c7009092dbe\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.640547 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbrjl\" (UniqueName: \"kubernetes.io/projected/22451284-6148-4113-a7f5-7c7009092dbe-kube-api-access-gbrjl\") pod \"22451284-6148-4113-a7f5-7c7009092dbe\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.640591 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-config\") pod \"22451284-6148-4113-a7f5-7c7009092dbe\" (UID: \"22451284-6148-4113-a7f5-7c7009092dbe\") " Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.663097 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22451284-6148-4113-a7f5-7c7009092dbe-kube-api-access-gbrjl" (OuterVolumeSpecName: "kube-api-access-gbrjl") pod "22451284-6148-4113-a7f5-7c7009092dbe" (UID: "22451284-6148-4113-a7f5-7c7009092dbe"). InnerVolumeSpecName "kube-api-access-gbrjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.712823 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "22451284-6148-4113-a7f5-7c7009092dbe" (UID: "22451284-6148-4113-a7f5-7c7009092dbe"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.721156 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "22451284-6148-4113-a7f5-7c7009092dbe" (UID: "22451284-6148-4113-a7f5-7c7009092dbe"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.733620 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-config" (OuterVolumeSpecName: "config") pod "22451284-6148-4113-a7f5-7c7009092dbe" (UID: "22451284-6148-4113-a7f5-7c7009092dbe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.734183 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "22451284-6148-4113-a7f5-7c7009092dbe" (UID: "22451284-6148-4113-a7f5-7c7009092dbe"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.745557 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.745606 5016 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.745619 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.745632 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbrjl\" (UniqueName: \"kubernetes.io/projected/22451284-6148-4113-a7f5-7c7009092dbe-kube-api-access-gbrjl\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.745649 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.747015 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "22451284-6148-4113-a7f5-7c7009092dbe" (UID: "22451284-6148-4113-a7f5-7c7009092dbe"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.774547 5016 generic.go:334] "Generic (PLEG): container finished" podID="22451284-6148-4113-a7f5-7c7009092dbe" containerID="c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249" exitCode=0 Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.774647 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" event={"ID":"22451284-6148-4113-a7f5-7c7009092dbe","Type":"ContainerDied","Data":"c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249"} Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.774983 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" event={"ID":"22451284-6148-4113-a7f5-7c7009092dbe","Type":"ContainerDied","Data":"e80d8cedd7ec2cbc886616b8a63ef190cf3a108b37f660075ce85424b3181506"} Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.775023 5016 scope.go:117] "RemoveContainer" containerID="c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.774679 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-42gmc" Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.835369 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-42gmc"] Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.846389 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-42gmc"] Dec 11 10:57:01 crc kubenswrapper[5016]: I1211 10:57:01.847146 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22451284-6148-4113-a7f5-7c7009092dbe-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:03 crc kubenswrapper[5016]: I1211 10:57:03.045887 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:57:03 crc kubenswrapper[5016]: I1211 10:57:03.319923 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:57:03 crc kubenswrapper[5016]: I1211 10:57:03.496242 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22451284-6148-4113-a7f5-7c7009092dbe" path="/var/lib/kubelet/pods/22451284-6148-4113-a7f5-7c7009092dbe/volumes" Dec 11 10:57:05 crc kubenswrapper[5016]: I1211 10:57:05.059292 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:57:05 crc kubenswrapper[5016]: I1211 10:57:05.277928 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-75d7945896-vvw5x" Dec 11 10:57:05 crc kubenswrapper[5016]: I1211 10:57:05.356369 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7d75dc7f86-8kj4j"] Dec 11 10:57:05 crc kubenswrapper[5016]: I1211 10:57:05.356598 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7d75dc7f86-8kj4j" podUID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" containerName="barbican-api-log" containerID="cri-o://448d139089718b479bb8db1d152c382059860a37c2adedbff50cfd0dc385c2b4" gracePeriod=30 Dec 11 10:57:05 crc kubenswrapper[5016]: I1211 10:57:05.356902 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7d75dc7f86-8kj4j" podUID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" containerName="barbican-api" containerID="cri-o://cfd1756a2ff29b932665723464a5aa5ae38cf3f0ba2265742f8854f7df2b68fa" gracePeriod=30 Dec 11 10:57:05 crc kubenswrapper[5016]: I1211 10:57:05.839414 5016 generic.go:334] "Generic (PLEG): container finished" podID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" containerID="448d139089718b479bb8db1d152c382059860a37c2adedbff50cfd0dc385c2b4" exitCode=143 Dec 11 10:57:05 crc kubenswrapper[5016]: I1211 10:57:05.839838 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7d75dc7f86-8kj4j" event={"ID":"f8d47a18-0c9f-4126-8e93-5ba2544b1480","Type":"ContainerDied","Data":"448d139089718b479bb8db1d152c382059860a37c2adedbff50cfd0dc385c2b4"} Dec 11 10:57:06 crc kubenswrapper[5016]: I1211 10:57:06.855660 5016 generic.go:334] "Generic (PLEG): container finished" podID="d31ff49c-2515-4b93-b3b8-e776e3190ab7" containerID="12cce264a30a2a12ec5a15c1064e8eec542a78f676eef42f65b94a06a644a7d9" exitCode=0 Dec 11 10:57:06 crc kubenswrapper[5016]: I1211 10:57:06.856034 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dgkjf" event={"ID":"d31ff49c-2515-4b93-b3b8-e776e3190ab7","Type":"ContainerDied","Data":"12cce264a30a2a12ec5a15c1064e8eec542a78f676eef42f65b94a06a644a7d9"} Dec 11 10:57:08 crc kubenswrapper[5016]: I1211 10:57:08.320418 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-78bccb96bd-btt5f" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Dec 11 10:57:08 crc kubenswrapper[5016]: I1211 10:57:08.444249 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7df5fc4844-wdnrz" podUID="02741cc6-3a2a-48c1-b492-57762e0d75e6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Dec 11 10:57:08 crc kubenswrapper[5016]: I1211 10:57:08.511753 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7d75dc7f86-8kj4j" podUID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.155:9311/healthcheck\": read tcp 10.217.0.2:44170->10.217.0.155:9311: read: connection reset by peer" Dec 11 10:57:08 crc kubenswrapper[5016]: I1211 10:57:08.511808 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7d75dc7f86-8kj4j" podUID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.155:9311/healthcheck\": read tcp 10.217.0.2:44162->10.217.0.155:9311: read: connection reset by peer" Dec 11 10:57:09 crc kubenswrapper[5016]: I1211 10:57:09.884484 5016 generic.go:334] "Generic (PLEG): container finished" podID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" containerID="cfd1756a2ff29b932665723464a5aa5ae38cf3f0ba2265742f8854f7df2b68fa" exitCode=0 Dec 11 10:57:09 crc kubenswrapper[5016]: I1211 10:57:09.884575 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7d75dc7f86-8kj4j" event={"ID":"f8d47a18-0c9f-4126-8e93-5ba2544b1480","Type":"ContainerDied","Data":"cfd1756a2ff29b932665723464a5aa5ae38cf3f0ba2265742f8854f7df2b68fa"} Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.041229 5016 scope.go:117] "RemoveContainer" containerID="6f639f5c0e6d4f6573048450c5dc2c37d9568e3a0b53d57de3129f76c042ffd7" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.170994 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.348074 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-db-sync-config-data\") pod \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.348607 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d31ff49c-2515-4b93-b3b8-e776e3190ab7-etc-machine-id\") pod \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.348656 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-scripts\") pod \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.348705 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-combined-ca-bundle\") pod \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.348752 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bdh2\" (UniqueName: \"kubernetes.io/projected/d31ff49c-2515-4b93-b3b8-e776e3190ab7-kube-api-access-6bdh2\") pod \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.348774 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-config-data\") pod \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\" (UID: \"d31ff49c-2515-4b93-b3b8-e776e3190ab7\") " Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.348786 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d31ff49c-2515-4b93-b3b8-e776e3190ab7-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d31ff49c-2515-4b93-b3b8-e776e3190ab7" (UID: "d31ff49c-2515-4b93-b3b8-e776e3190ab7"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.349084 5016 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d31ff49c-2515-4b93-b3b8-e776e3190ab7-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.358090 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "d31ff49c-2515-4b93-b3b8-e776e3190ab7" (UID: "d31ff49c-2515-4b93-b3b8-e776e3190ab7"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.359337 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d31ff49c-2515-4b93-b3b8-e776e3190ab7-kube-api-access-6bdh2" (OuterVolumeSpecName: "kube-api-access-6bdh2") pod "d31ff49c-2515-4b93-b3b8-e776e3190ab7" (UID: "d31ff49c-2515-4b93-b3b8-e776e3190ab7"). InnerVolumeSpecName "kube-api-access-6bdh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.369799 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-scripts" (OuterVolumeSpecName: "scripts") pod "d31ff49c-2515-4b93-b3b8-e776e3190ab7" (UID: "d31ff49c-2515-4b93-b3b8-e776e3190ab7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.386051 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d31ff49c-2515-4b93-b3b8-e776e3190ab7" (UID: "d31ff49c-2515-4b93-b3b8-e776e3190ab7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.419996 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-config-data" (OuterVolumeSpecName: "config-data") pod "d31ff49c-2515-4b93-b3b8-e776e3190ab7" (UID: "d31ff49c-2515-4b93-b3b8-e776e3190ab7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.451386 5016 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.451423 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.451437 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.451447 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bdh2\" (UniqueName: \"kubernetes.io/projected/d31ff49c-2515-4b93-b3b8-e776e3190ab7-kube-api-access-6bdh2\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.451458 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d31ff49c-2515-4b93-b3b8-e776e3190ab7-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.650036 5016 scope.go:117] "RemoveContainer" containerID="c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249" Dec 11 10:57:10 crc kubenswrapper[5016]: E1211 10:57:10.650680 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249\": container with ID starting with c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249 not found: ID does not exist" containerID="c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.650728 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249"} err="failed to get container status \"c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249\": rpc error: code = NotFound desc = could not find container \"c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249\": container with ID starting with c9a26d275b898c601e2740d92e507943aa1ea274f5f0639d28631297dedd5249 not found: ID does not exist" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.650751 5016 scope.go:117] "RemoveContainer" containerID="6f639f5c0e6d4f6573048450c5dc2c37d9568e3a0b53d57de3129f76c042ffd7" Dec 11 10:57:10 crc kubenswrapper[5016]: E1211 10:57:10.651498 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f639f5c0e6d4f6573048450c5dc2c37d9568e3a0b53d57de3129f76c042ffd7\": container with ID starting with 6f639f5c0e6d4f6573048450c5dc2c37d9568e3a0b53d57de3129f76c042ffd7 not found: ID does not exist" containerID="6f639f5c0e6d4f6573048450c5dc2c37d9568e3a0b53d57de3129f76c042ffd7" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.651541 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f639f5c0e6d4f6573048450c5dc2c37d9568e3a0b53d57de3129f76c042ffd7"} err="failed to get container status \"6f639f5c0e6d4f6573048450c5dc2c37d9568e3a0b53d57de3129f76c042ffd7\": rpc error: code = NotFound desc = could not find container \"6f639f5c0e6d4f6573048450c5dc2c37d9568e3a0b53d57de3129f76c042ffd7\": container with ID starting with 6f639f5c0e6d4f6573048450c5dc2c37d9568e3a0b53d57de3129f76c042ffd7 not found: ID does not exist" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.652534 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.759725 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-combined-ca-bundle\") pod \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.759768 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-config-data-custom\") pod \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.759905 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-config-data\") pod \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.759947 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8d47a18-0c9f-4126-8e93-5ba2544b1480-logs\") pod \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.760063 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7zrr\" (UniqueName: \"kubernetes.io/projected/f8d47a18-0c9f-4126-8e93-5ba2544b1480-kube-api-access-z7zrr\") pod \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\" (UID: \"f8d47a18-0c9f-4126-8e93-5ba2544b1480\") " Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.761268 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8d47a18-0c9f-4126-8e93-5ba2544b1480-logs" (OuterVolumeSpecName: "logs") pod "f8d47a18-0c9f-4126-8e93-5ba2544b1480" (UID: "f8d47a18-0c9f-4126-8e93-5ba2544b1480"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.763731 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f8d47a18-0c9f-4126-8e93-5ba2544b1480" (UID: "f8d47a18-0c9f-4126-8e93-5ba2544b1480"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.765059 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8d47a18-0c9f-4126-8e93-5ba2544b1480-kube-api-access-z7zrr" (OuterVolumeSpecName: "kube-api-access-z7zrr") pod "f8d47a18-0c9f-4126-8e93-5ba2544b1480" (UID: "f8d47a18-0c9f-4126-8e93-5ba2544b1480"). InnerVolumeSpecName "kube-api-access-z7zrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.790175 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f8d47a18-0c9f-4126-8e93-5ba2544b1480" (UID: "f8d47a18-0c9f-4126-8e93-5ba2544b1480"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.815900 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-config-data" (OuterVolumeSpecName: "config-data") pod "f8d47a18-0c9f-4126-8e93-5ba2544b1480" (UID: "f8d47a18-0c9f-4126-8e93-5ba2544b1480"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.863396 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.863449 5016 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.863464 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8d47a18-0c9f-4126-8e93-5ba2544b1480-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.863478 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8d47a18-0c9f-4126-8e93-5ba2544b1480-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.863494 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7zrr\" (UniqueName: \"kubernetes.io/projected/f8d47a18-0c9f-4126-8e93-5ba2544b1480-kube-api-access-z7zrr\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.900149 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7d75dc7f86-8kj4j" event={"ID":"f8d47a18-0c9f-4126-8e93-5ba2544b1480","Type":"ContainerDied","Data":"da4ecaa3843ba06ff00ea6d7e8a20a3106b88d149077d74feacf469753aff6ea"} Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.900532 5016 scope.go:117] "RemoveContainer" containerID="cfd1756a2ff29b932665723464a5aa5ae38cf3f0ba2265742f8854f7df2b68fa" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.900194 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7d75dc7f86-8kj4j" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.903693 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dgkjf" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.904449 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dgkjf" event={"ID":"d31ff49c-2515-4b93-b3b8-e776e3190ab7","Type":"ContainerDied","Data":"0752d0f4d8f884f3d957eeeec24f5bd075c2eba2887b057a0cfadade2658cc36"} Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.904524 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0752d0f4d8f884f3d957eeeec24f5bd075c2eba2887b057a0cfadade2658cc36" Dec 11 10:57:10 crc kubenswrapper[5016]: E1211 10:57:10.910477 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.931728 5016 scope.go:117] "RemoveContainer" containerID="448d139089718b479bb8db1d152c382059860a37c2adedbff50cfd0dc385c2b4" Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.948294 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7d75dc7f86-8kj4j"] Dec 11 10:57:10 crc kubenswrapper[5016]: I1211 10:57:10.959881 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-7d75dc7f86-8kj4j"] Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.488386 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" path="/var/lib/kubelet/pods/f8d47a18-0c9f-4126-8e93-5ba2544b1480/volumes" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.510960 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 10:57:11 crc kubenswrapper[5016]: E1211 10:57:11.511678 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" containerName="barbican-api-log" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.511707 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" containerName="barbican-api-log" Dec 11 10:57:11 crc kubenswrapper[5016]: E1211 10:57:11.511726 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" containerName="barbican-api" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.511734 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" containerName="barbican-api" Dec 11 10:57:11 crc kubenswrapper[5016]: E1211 10:57:11.511745 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22451284-6148-4113-a7f5-7c7009092dbe" containerName="dnsmasq-dns" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.511752 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="22451284-6148-4113-a7f5-7c7009092dbe" containerName="dnsmasq-dns" Dec 11 10:57:11 crc kubenswrapper[5016]: E1211 10:57:11.511766 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d31ff49c-2515-4b93-b3b8-e776e3190ab7" containerName="cinder-db-sync" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.511772 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="d31ff49c-2515-4b93-b3b8-e776e3190ab7" containerName="cinder-db-sync" Dec 11 10:57:11 crc kubenswrapper[5016]: E1211 10:57:11.511805 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22451284-6148-4113-a7f5-7c7009092dbe" containerName="init" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.511812 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="22451284-6148-4113-a7f5-7c7009092dbe" containerName="init" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.512140 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="d31ff49c-2515-4b93-b3b8-e776e3190ab7" containerName="cinder-db-sync" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.512184 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="22451284-6148-4113-a7f5-7c7009092dbe" containerName="dnsmasq-dns" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.512203 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" containerName="barbican-api" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.512222 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8d47a18-0c9f-4126-8e93-5ba2544b1480" containerName="barbican-api-log" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.513830 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.519468 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.520950 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-6mwkk" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.521252 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.521400 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.521549 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.572581 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-9xt82"] Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.589515 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.596792 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-9xt82"] Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.684221 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-config\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.684272 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-config-data\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.684324 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.684350 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.684414 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.684454 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.684484 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sk7h4\" (UniqueName: \"kubernetes.io/projected/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-kube-api-access-sk7h4\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.684522 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.684682 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xk949\" (UniqueName: \"kubernetes.io/projected/a1e43ec9-f406-4d7b-8928-3e3ae504973d-kube-api-access-xk949\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.684761 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-dns-svc\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.684852 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-scripts\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.684913 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.786722 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xk949\" (UniqueName: \"kubernetes.io/projected/a1e43ec9-f406-4d7b-8928-3e3ae504973d-kube-api-access-xk949\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.786916 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-dns-svc\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.787006 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-scripts\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.787056 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.787145 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-config\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.787179 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-config-data\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.787234 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.787256 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.787286 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.787319 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.787355 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sk7h4\" (UniqueName: \"kubernetes.io/projected/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-kube-api-access-sk7h4\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.787388 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.787705 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.788696 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-dns-svc\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.788769 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.789333 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.792029 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-config\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.796566 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-scripts\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.796621 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.802646 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.810065 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.812210 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.812215 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sk7h4\" (UniqueName: \"kubernetes.io/projected/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-kube-api-access-sk7h4\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.814508 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.817868 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.820280 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-config-data\") pod \"cinder-scheduler-0\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.828405 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.828528 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xk949\" (UniqueName: \"kubernetes.io/projected/a1e43ec9-f406-4d7b-8928-3e3ae504973d-kube-api-access-xk949\") pod \"dnsmasq-dns-6578955fd5-9xt82\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.839656 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.916807 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.921654 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49507f32-2b67-4dc4-a968-a691ca6c8454","Type":"ContainerStarted","Data":"76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439"} Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.921882 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerName="proxy-httpd" containerID="cri-o://76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439" gracePeriod=30 Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.921893 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.922072 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerName="ceilometer-notification-agent" containerID="cri-o://e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3" gracePeriod=30 Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.922177 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerName="sg-core" containerID="cri-o://da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c" gracePeriod=30 Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.997381 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c307ef5e-0f02-4aa1-af42-795a6ae19393-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.997832 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-scripts\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.997864 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-config-data\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.997911 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4t96\" (UniqueName: \"kubernetes.io/projected/c307ef5e-0f02-4aa1-af42-795a6ae19393-kube-api-access-l4t96\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.997971 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-config-data-custom\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.998008 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:11 crc kubenswrapper[5016]: I1211 10:57:11.998093 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c307ef5e-0f02-4aa1-af42-795a6ae19393-logs\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.101192 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c307ef5e-0f02-4aa1-af42-795a6ae19393-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.100896 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c307ef5e-0f02-4aa1-af42-795a6ae19393-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.104165 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-scripts\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.104194 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-config-data\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.104253 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4t96\" (UniqueName: \"kubernetes.io/projected/c307ef5e-0f02-4aa1-af42-795a6ae19393-kube-api-access-l4t96\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.104309 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-config-data-custom\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.104352 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.104508 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c307ef5e-0f02-4aa1-af42-795a6ae19393-logs\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.105331 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c307ef5e-0f02-4aa1-af42-795a6ae19393-logs\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.114358 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-scripts\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.115229 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.116044 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-config-data-custom\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.119800 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-config-data\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.141490 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4t96\" (UniqueName: \"kubernetes.io/projected/c307ef5e-0f02-4aa1-af42-795a6ae19393-kube-api-access-l4t96\") pod \"cinder-api-0\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.218364 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.416191 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 10:57:12 crc kubenswrapper[5016]: W1211 10:57:12.421985 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod97a53b6e_c2fd_4f0b_97d6_cc8a33fdcc9b.slice/crio-70d071cd880ac2b641d8b499d064e4192e3377abe767a2d70dc3ceabb0c87458 WatchSource:0}: Error finding container 70d071cd880ac2b641d8b499d064e4192e3377abe767a2d70dc3ceabb0c87458: Status 404 returned error can't find the container with id 70d071cd880ac2b641d8b499d064e4192e3377abe767a2d70dc3ceabb0c87458 Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.485245 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-9xt82"] Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.530505 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.801319 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.961301 5016 generic.go:334] "Generic (PLEG): container finished" podID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerID="76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439" exitCode=0 Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.961351 5016 generic.go:334] "Generic (PLEG): container finished" podID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerID="da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c" exitCode=2 Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.961403 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49507f32-2b67-4dc4-a968-a691ca6c8454","Type":"ContainerDied","Data":"76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439"} Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.961447 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49507f32-2b67-4dc4-a968-a691ca6c8454","Type":"ContainerDied","Data":"da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c"} Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.962780 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c307ef5e-0f02-4aa1-af42-795a6ae19393","Type":"ContainerStarted","Data":"2e2116f178bf48e96f1bc110094df6ba17674ed704bff5c84ece6aeb005fd021"} Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.964757 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b","Type":"ContainerStarted","Data":"70d071cd880ac2b641d8b499d064e4192e3377abe767a2d70dc3ceabb0c87458"} Dec 11 10:57:12 crc kubenswrapper[5016]: I1211 10:57:12.967753 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" event={"ID":"a1e43ec9-f406-4d7b-8928-3e3ae504973d","Type":"ContainerStarted","Data":"cb0ab21bfb56a0ea1604dbe07204d76232d23b46a803378a3ceeb26d67cc005d"} Dec 11 10:57:13 crc kubenswrapper[5016]: I1211 10:57:13.813616 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 11 10:57:14 crc kubenswrapper[5016]: I1211 10:57:14.008583 5016 generic.go:334] "Generic (PLEG): container finished" podID="a1e43ec9-f406-4d7b-8928-3e3ae504973d" containerID="5d80ddf340a63b1965af906c66d29de8220f43794b62ae462a8059ef41368671" exitCode=0 Dec 11 10:57:14 crc kubenswrapper[5016]: I1211 10:57:14.008703 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" event={"ID":"a1e43ec9-f406-4d7b-8928-3e3ae504973d","Type":"ContainerDied","Data":"5d80ddf340a63b1965af906c66d29de8220f43794b62ae462a8059ef41368671"} Dec 11 10:57:14 crc kubenswrapper[5016]: I1211 10:57:14.012512 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c307ef5e-0f02-4aa1-af42-795a6ae19393","Type":"ContainerStarted","Data":"637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a"} Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.039750 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c307ef5e-0f02-4aa1-af42-795a6ae19393","Type":"ContainerStarted","Data":"b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0"} Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.040225 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c307ef5e-0f02-4aa1-af42-795a6ae19393" containerName="cinder-api-log" containerID="cri-o://637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a" gracePeriod=30 Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.040373 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c307ef5e-0f02-4aa1-af42-795a6ae19393" containerName="cinder-api" containerID="cri-o://b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0" gracePeriod=30 Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.040615 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.043258 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b","Type":"ContainerStarted","Data":"66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5"} Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.051473 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" event={"ID":"a1e43ec9-f406-4d7b-8928-3e3ae504973d","Type":"ContainerStarted","Data":"629aa68ce39880cf13d5df3823e1ed3793c73ed76ea74abecd6b3c44e97522d4"} Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.052985 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.070698 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.070676609 podStartE2EDuration="4.070676609s" podCreationTimestamp="2025-12-11 10:57:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:57:15.063662627 +0000 UTC m=+1351.882222226" watchObservedRunningTime="2025-12-11 10:57:15.070676609 +0000 UTC m=+1351.889236198" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.093874 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" podStartSLOduration=4.093850355 podStartE2EDuration="4.093850355s" podCreationTimestamp="2025-12-11 10:57:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:57:15.08462282 +0000 UTC m=+1351.903182409" watchObservedRunningTime="2025-12-11 10:57:15.093850355 +0000 UTC m=+1351.912409934" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.734299 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.741391 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.896682 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-scripts\") pod \"c307ef5e-0f02-4aa1-af42-795a6ae19393\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.896767 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-combined-ca-bundle\") pod \"c307ef5e-0f02-4aa1-af42-795a6ae19393\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.896802 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-combined-ca-bundle\") pod \"49507f32-2b67-4dc4-a968-a691ca6c8454\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.896836 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c307ef5e-0f02-4aa1-af42-795a6ae19393-logs\") pod \"c307ef5e-0f02-4aa1-af42-795a6ae19393\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.896862 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-scripts\") pod \"49507f32-2b67-4dc4-a968-a691ca6c8454\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.896896 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c307ef5e-0f02-4aa1-af42-795a6ae19393-etc-machine-id\") pod \"c307ef5e-0f02-4aa1-af42-795a6ae19393\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.896927 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-sg-core-conf-yaml\") pod \"49507f32-2b67-4dc4-a968-a691ca6c8454\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.897107 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4t96\" (UniqueName: \"kubernetes.io/projected/c307ef5e-0f02-4aa1-af42-795a6ae19393-kube-api-access-l4t96\") pod \"c307ef5e-0f02-4aa1-af42-795a6ae19393\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.897163 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-config-data\") pod \"c307ef5e-0f02-4aa1-af42-795a6ae19393\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.897185 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-config-data-custom\") pod \"c307ef5e-0f02-4aa1-af42-795a6ae19393\" (UID: \"c307ef5e-0f02-4aa1-af42-795a6ae19393\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.897230 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-config-data\") pod \"49507f32-2b67-4dc4-a968-a691ca6c8454\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.897302 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsn2p\" (UniqueName: \"kubernetes.io/projected/49507f32-2b67-4dc4-a968-a691ca6c8454-kube-api-access-tsn2p\") pod \"49507f32-2b67-4dc4-a968-a691ca6c8454\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.897326 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49507f32-2b67-4dc4-a968-a691ca6c8454-log-httpd\") pod \"49507f32-2b67-4dc4-a968-a691ca6c8454\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.897371 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49507f32-2b67-4dc4-a968-a691ca6c8454-run-httpd\") pod \"49507f32-2b67-4dc4-a968-a691ca6c8454\" (UID: \"49507f32-2b67-4dc4-a968-a691ca6c8454\") " Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.897366 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c307ef5e-0f02-4aa1-af42-795a6ae19393-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c307ef5e-0f02-4aa1-af42-795a6ae19393" (UID: "c307ef5e-0f02-4aa1-af42-795a6ae19393"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.898350 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c307ef5e-0f02-4aa1-af42-795a6ae19393-logs" (OuterVolumeSpecName: "logs") pod "c307ef5e-0f02-4aa1-af42-795a6ae19393" (UID: "c307ef5e-0f02-4aa1-af42-795a6ae19393"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.898914 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49507f32-2b67-4dc4-a968-a691ca6c8454-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "49507f32-2b67-4dc4-a968-a691ca6c8454" (UID: "49507f32-2b67-4dc4-a968-a691ca6c8454"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.898967 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49507f32-2b67-4dc4-a968-a691ca6c8454-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "49507f32-2b67-4dc4-a968-a691ca6c8454" (UID: "49507f32-2b67-4dc4-a968-a691ca6c8454"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.905806 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c307ef5e-0f02-4aa1-af42-795a6ae19393-kube-api-access-l4t96" (OuterVolumeSpecName: "kube-api-access-l4t96") pod "c307ef5e-0f02-4aa1-af42-795a6ae19393" (UID: "c307ef5e-0f02-4aa1-af42-795a6ae19393"). InnerVolumeSpecName "kube-api-access-l4t96". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.906846 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-scripts" (OuterVolumeSpecName: "scripts") pod "49507f32-2b67-4dc4-a968-a691ca6c8454" (UID: "49507f32-2b67-4dc4-a968-a691ca6c8454"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.907069 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49507f32-2b67-4dc4-a968-a691ca6c8454-kube-api-access-tsn2p" (OuterVolumeSpecName: "kube-api-access-tsn2p") pod "49507f32-2b67-4dc4-a968-a691ca6c8454" (UID: "49507f32-2b67-4dc4-a968-a691ca6c8454"). InnerVolumeSpecName "kube-api-access-tsn2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.907197 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-scripts" (OuterVolumeSpecName: "scripts") pod "c307ef5e-0f02-4aa1-af42-795a6ae19393" (UID: "c307ef5e-0f02-4aa1-af42-795a6ae19393"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.908677 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c307ef5e-0f02-4aa1-af42-795a6ae19393" (UID: "c307ef5e-0f02-4aa1-af42-795a6ae19393"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.935981 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c307ef5e-0f02-4aa1-af42-795a6ae19393" (UID: "c307ef5e-0f02-4aa1-af42-795a6ae19393"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.938446 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "49507f32-2b67-4dc4-a968-a691ca6c8454" (UID: "49507f32-2b67-4dc4-a968-a691ca6c8454"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.973838 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49507f32-2b67-4dc4-a968-a691ca6c8454" (UID: "49507f32-2b67-4dc4-a968-a691ca6c8454"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.991117 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-config-data" (OuterVolumeSpecName: "config-data") pod "c307ef5e-0f02-4aa1-af42-795a6ae19393" (UID: "c307ef5e-0f02-4aa1-af42-795a6ae19393"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:15 crc kubenswrapper[5016]: I1211 10:57:15.999872 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:15.999916 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.000166 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.000175 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c307ef5e-0f02-4aa1-af42-795a6ae19393-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.000183 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.000191 5016 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c307ef5e-0f02-4aa1-af42-795a6ae19393-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.000199 5016 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.000207 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4t96\" (UniqueName: \"kubernetes.io/projected/c307ef5e-0f02-4aa1-af42-795a6ae19393-kube-api-access-l4t96\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.000216 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.000224 5016 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c307ef5e-0f02-4aa1-af42-795a6ae19393-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.000232 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsn2p\" (UniqueName: \"kubernetes.io/projected/49507f32-2b67-4dc4-a968-a691ca6c8454-kube-api-access-tsn2p\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.000240 5016 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49507f32-2b67-4dc4-a968-a691ca6c8454-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.000249 5016 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49507f32-2b67-4dc4-a968-a691ca6c8454-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.009217 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5d4bc555dc-hjmj8" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.045668 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-config-data" (OuterVolumeSpecName: "config-data") pod "49507f32-2b67-4dc4-a968-a691ca6c8454" (UID: "49507f32-2b67-4dc4-a968-a691ca6c8454"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.070882 5016 generic.go:334] "Generic (PLEG): container finished" podID="c307ef5e-0f02-4aa1-af42-795a6ae19393" containerID="b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0" exitCode=0 Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.070913 5016 generic.go:334] "Generic (PLEG): container finished" podID="c307ef5e-0f02-4aa1-af42-795a6ae19393" containerID="637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a" exitCode=143 Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.070978 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c307ef5e-0f02-4aa1-af42-795a6ae19393","Type":"ContainerDied","Data":"b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0"} Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.071001 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c307ef5e-0f02-4aa1-af42-795a6ae19393","Type":"ContainerDied","Data":"637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a"} Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.071012 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c307ef5e-0f02-4aa1-af42-795a6ae19393","Type":"ContainerDied","Data":"2e2116f178bf48e96f1bc110094df6ba17674ed704bff5c84ece6aeb005fd021"} Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.071029 5016 scope.go:117] "RemoveContainer" containerID="b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.071151 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.087060 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b","Type":"ContainerStarted","Data":"6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a"} Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.125441 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49507f32-2b67-4dc4-a968-a691ca6c8454-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.138011 5016 generic.go:334] "Generic (PLEG): container finished" podID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerID="e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3" exitCode=0 Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.139091 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49507f32-2b67-4dc4-a968-a691ca6c8454","Type":"ContainerDied","Data":"e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3"} Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.139176 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.139177 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49507f32-2b67-4dc4-a968-a691ca6c8454","Type":"ContainerDied","Data":"619d93f807fb092aef2d07dcb8fd2927296312918b4e543b60616008235be231"} Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.149773 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-76bc74566d-xxk9f"] Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.150349 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-76bc74566d-xxk9f" podUID="4b4a7e5c-045f-434d-8744-60b045803cc3" containerName="neutron-api" containerID="cri-o://e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205" gracePeriod=30 Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.150527 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-76bc74566d-xxk9f" podUID="4b4a7e5c-045f-434d-8744-60b045803cc3" containerName="neutron-httpd" containerID="cri-o://55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7" gracePeriod=30 Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.176812 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.686502414 podStartE2EDuration="5.17679457s" podCreationTimestamp="2025-12-11 10:57:11 +0000 UTC" firstStartedPulling="2025-12-11 10:57:12.425833587 +0000 UTC m=+1349.244393166" lastFinishedPulling="2025-12-11 10:57:13.916125743 +0000 UTC m=+1350.734685322" observedRunningTime="2025-12-11 10:57:16.166244762 +0000 UTC m=+1352.984804351" watchObservedRunningTime="2025-12-11 10:57:16.17679457 +0000 UTC m=+1352.995354149" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.209671 5016 scope.go:117] "RemoveContainer" containerID="637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.219714 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.230244 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.255017 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 11 10:57:16 crc kubenswrapper[5016]: E1211 10:57:16.255481 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerName="ceilometer-notification-agent" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.255495 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerName="ceilometer-notification-agent" Dec 11 10:57:16 crc kubenswrapper[5016]: E1211 10:57:16.255508 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerName="sg-core" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.255514 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerName="sg-core" Dec 11 10:57:16 crc kubenswrapper[5016]: E1211 10:57:16.255532 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerName="proxy-httpd" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.255538 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerName="proxy-httpd" Dec 11 10:57:16 crc kubenswrapper[5016]: E1211 10:57:16.255548 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c307ef5e-0f02-4aa1-af42-795a6ae19393" containerName="cinder-api-log" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.255554 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="c307ef5e-0f02-4aa1-af42-795a6ae19393" containerName="cinder-api-log" Dec 11 10:57:16 crc kubenswrapper[5016]: E1211 10:57:16.255567 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c307ef5e-0f02-4aa1-af42-795a6ae19393" containerName="cinder-api" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.255573 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="c307ef5e-0f02-4aa1-af42-795a6ae19393" containerName="cinder-api" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.255763 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerName="ceilometer-notification-agent" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.255781 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="c307ef5e-0f02-4aa1-af42-795a6ae19393" containerName="cinder-api" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.255793 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerName="proxy-httpd" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.255804 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" containerName="sg-core" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.255821 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="c307ef5e-0f02-4aa1-af42-795a6ae19393" containerName="cinder-api-log" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.256877 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.263261 5016 scope.go:117] "RemoveContainer" containerID="b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0" Dec 11 10:57:16 crc kubenswrapper[5016]: E1211 10:57:16.272041 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0\": container with ID starting with b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0 not found: ID does not exist" containerID="b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.272115 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0"} err="failed to get container status \"b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0\": rpc error: code = NotFound desc = could not find container \"b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0\": container with ID starting with b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0 not found: ID does not exist" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.272167 5016 scope.go:117] "RemoveContainer" containerID="637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.272269 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.272810 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 11 10:57:16 crc kubenswrapper[5016]: E1211 10:57:16.272792 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a\": container with ID starting with 637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a not found: ID does not exist" containerID="637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.272866 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a"} err="failed to get container status \"637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a\": rpc error: code = NotFound desc = could not find container \"637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a\": container with ID starting with 637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a not found: ID does not exist" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.272898 5016 scope.go:117] "RemoveContainer" containerID="b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.273122 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.274273 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0"} err="failed to get container status \"b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0\": rpc error: code = NotFound desc = could not find container \"b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0\": container with ID starting with b30cd7fb3c51db37564b3c67ddb267cd464b47b0fe3a2cf672d836276deb3ca0 not found: ID does not exist" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.274308 5016 scope.go:117] "RemoveContainer" containerID="637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.274638 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a"} err="failed to get container status \"637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a\": rpc error: code = NotFound desc = could not find container \"637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a\": container with ID starting with 637bde6eb7de313ed1ebd851c7d07cdc84db194aae2f879bc87db5a2fcefde5a not found: ID does not exist" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.274670 5016 scope.go:117] "RemoveContainer" containerID="76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439" Dec 11 10:57:16 crc kubenswrapper[5016]: E1211 10:57:16.329153 5016 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc307ef5e_0f02_4aa1_af42_795a6ae19393.slice/crio-2e2116f178bf48e96f1bc110094df6ba17674ed704bff5c84ece6aeb005fd021\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49507f32_2b67_4dc4_a968_a691ca6c8454.slice/crio-619d93f807fb092aef2d07dcb8fd2927296312918b4e543b60616008235be231\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49507f32_2b67_4dc4_a968_a691ca6c8454.slice\": RecentStats: unable to find data in memory cache]" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.330449 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.339316 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.352221 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.355702 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.358106 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.360033 5016 scope.go:117] "RemoveContainer" containerID="da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.360375 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.360603 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.371715 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.407602 5016 scope.go:117] "RemoveContainer" containerID="e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.432107 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40789e09-e7ca-4ce3-8939-9ab2605e257f-logs\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.432170 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40789e09-e7ca-4ce3-8939-9ab2605e257f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.432248 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-scripts\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.432281 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-config-data\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.432389 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.432471 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fnvp\" (UniqueName: \"kubernetes.io/projected/40789e09-e7ca-4ce3-8939-9ab2605e257f-kube-api-access-8fnvp\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.432527 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-public-tls-certs\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.432640 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.432681 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-config-data-custom\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.437003 5016 scope.go:117] "RemoveContainer" containerID="76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439" Dec 11 10:57:16 crc kubenswrapper[5016]: E1211 10:57:16.437729 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439\": container with ID starting with 76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439 not found: ID does not exist" containerID="76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.437770 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439"} err="failed to get container status \"76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439\": rpc error: code = NotFound desc = could not find container \"76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439\": container with ID starting with 76796b96be32fb132e1da335508b8da0ee45097e22526ba05ce48a087e964439 not found: ID does not exist" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.437796 5016 scope.go:117] "RemoveContainer" containerID="da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c" Dec 11 10:57:16 crc kubenswrapper[5016]: E1211 10:57:16.438196 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c\": container with ID starting with da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c not found: ID does not exist" containerID="da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.438223 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c"} err="failed to get container status \"da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c\": rpc error: code = NotFound desc = could not find container \"da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c\": container with ID starting with da08e9a47a85eac1a4d5ad31264859c9a8de2269802d5323409ac4828d7b837c not found: ID does not exist" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.438238 5016 scope.go:117] "RemoveContainer" containerID="e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3" Dec 11 10:57:16 crc kubenswrapper[5016]: E1211 10:57:16.438547 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3\": container with ID starting with e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3 not found: ID does not exist" containerID="e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.438576 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3"} err="failed to get container status \"e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3\": rpc error: code = NotFound desc = could not find container \"e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3\": container with ID starting with e04f7ad9b767a0d0a1bf8cc8b50e716f613cccda695c6c6374c21b7e3c9ed1f3 not found: ID does not exist" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535158 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40789e09-e7ca-4ce3-8939-9ab2605e257f-logs\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535234 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40789e09-e7ca-4ce3-8939-9ab2605e257f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535268 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/53bba674-9897-4df3-8898-bfba40d352f2-run-httpd\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535301 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-config-data\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535320 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535343 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-scripts\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535389 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/53bba674-9897-4df3-8898-bfba40d352f2-log-httpd\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535410 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-scripts\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535451 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-config-data\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535485 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535522 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fnvp\" (UniqueName: \"kubernetes.io/projected/40789e09-e7ca-4ce3-8939-9ab2605e257f-kube-api-access-8fnvp\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535555 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-public-tls-certs\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535585 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535645 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535673 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-config-data-custom\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.535706 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6ksj\" (UniqueName: \"kubernetes.io/projected/53bba674-9897-4df3-8898-bfba40d352f2-kube-api-access-g6ksj\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.536248 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40789e09-e7ca-4ce3-8939-9ab2605e257f-logs\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.536302 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40789e09-e7ca-4ce3-8939-9ab2605e257f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.543920 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-public-tls-certs\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.544469 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.545038 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-config-data-custom\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.547336 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-config-data\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.548561 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.548851 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40789e09-e7ca-4ce3-8939-9ab2605e257f-scripts\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.559475 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fnvp\" (UniqueName: \"kubernetes.io/projected/40789e09-e7ca-4ce3-8939-9ab2605e257f-kube-api-access-8fnvp\") pod \"cinder-api-0\" (UID: \"40789e09-e7ca-4ce3-8939-9ab2605e257f\") " pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.631816 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.648308 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.648389 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6ksj\" (UniqueName: \"kubernetes.io/projected/53bba674-9897-4df3-8898-bfba40d352f2-kube-api-access-g6ksj\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.648495 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/53bba674-9897-4df3-8898-bfba40d352f2-run-httpd\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.648520 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-config-data\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.648544 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.648575 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-scripts\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.648616 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/53bba674-9897-4df3-8898-bfba40d352f2-log-httpd\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.649771 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/53bba674-9897-4df3-8898-bfba40d352f2-run-httpd\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.649853 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/53bba674-9897-4df3-8898-bfba40d352f2-log-httpd\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.653378 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.655567 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-config-data\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.658448 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-scripts\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.660703 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.667550 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6ksj\" (UniqueName: \"kubernetes.io/projected/53bba674-9897-4df3-8898-bfba40d352f2-kube-api-access-g6ksj\") pod \"ceilometer-0\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.699150 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:57:16 crc kubenswrapper[5016]: I1211 10:57:16.839823 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 11 10:57:17 crc kubenswrapper[5016]: I1211 10:57:17.124129 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 11 10:57:17 crc kubenswrapper[5016]: W1211 10:57:17.130142 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40789e09_e7ca_4ce3_8939_9ab2605e257f.slice/crio-11005fa3233882bd6215aa770355e2af8501a24941d80ea393ae21e70a7d68cb WatchSource:0}: Error finding container 11005fa3233882bd6215aa770355e2af8501a24941d80ea393ae21e70a7d68cb: Status 404 returned error can't find the container with id 11005fa3233882bd6215aa770355e2af8501a24941d80ea393ae21e70a7d68cb Dec 11 10:57:17 crc kubenswrapper[5016]: I1211 10:57:17.139422 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:17 crc kubenswrapper[5016]: I1211 10:57:17.164077 5016 generic.go:334] "Generic (PLEG): container finished" podID="4b4a7e5c-045f-434d-8744-60b045803cc3" containerID="55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7" exitCode=0 Dec 11 10:57:17 crc kubenswrapper[5016]: I1211 10:57:17.164195 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-76bc74566d-xxk9f" event={"ID":"4b4a7e5c-045f-434d-8744-60b045803cc3","Type":"ContainerDied","Data":"55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7"} Dec 11 10:57:17 crc kubenswrapper[5016]: I1211 10:57:17.170090 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"40789e09-e7ca-4ce3-8939-9ab2605e257f","Type":"ContainerStarted","Data":"11005fa3233882bd6215aa770355e2af8501a24941d80ea393ae21e70a7d68cb"} Dec 11 10:57:17 crc kubenswrapper[5016]: I1211 10:57:17.488261 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49507f32-2b67-4dc4-a968-a691ca6c8454" path="/var/lib/kubelet/pods/49507f32-2b67-4dc4-a968-a691ca6c8454/volumes" Dec 11 10:57:17 crc kubenswrapper[5016]: I1211 10:57:17.489263 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c307ef5e-0f02-4aa1-af42-795a6ae19393" path="/var/lib/kubelet/pods/c307ef5e-0f02-4aa1-af42-795a6ae19393/volumes" Dec 11 10:57:18 crc kubenswrapper[5016]: I1211 10:57:18.185928 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"40789e09-e7ca-4ce3-8939-9ab2605e257f","Type":"ContainerStarted","Data":"bdc625e2d2345ff5e1cf4555efedba40e6555d3cf0468a641cd2a6ed7191a7b1"} Dec 11 10:57:18 crc kubenswrapper[5016]: I1211 10:57:18.190394 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"53bba674-9897-4df3-8898-bfba40d352f2","Type":"ContainerStarted","Data":"d7d97a58f39c8cc6305c4b3e468721854224785535decd54547ea267ef281951"} Dec 11 10:57:18 crc kubenswrapper[5016]: I1211 10:57:18.190518 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"53bba674-9897-4df3-8898-bfba40d352f2","Type":"ContainerStarted","Data":"8250ad5bff45a2896a193d47ff73cba46914d7034e4cf1ad7d51e2597ddb44b2"} Dec 11 10:57:19 crc kubenswrapper[5016]: I1211 10:57:19.222065 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"53bba674-9897-4df3-8898-bfba40d352f2","Type":"ContainerStarted","Data":"9d1b4a05f1389775128fcbbceb131df2c8eac3e95800a2adc09bd6c8934d0305"} Dec 11 10:57:19 crc kubenswrapper[5016]: I1211 10:57:19.225751 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"40789e09-e7ca-4ce3-8939-9ab2605e257f","Type":"ContainerStarted","Data":"06e9b592d02555a8cdb81e21db979e22bf371347839dbdc09262663cfebda829"} Dec 11 10:57:19 crc kubenswrapper[5016]: I1211 10:57:19.226002 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 11 10:57:19 crc kubenswrapper[5016]: I1211 10:57:19.256497 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.256477526 podStartE2EDuration="3.256477526s" podCreationTimestamp="2025-12-11 10:57:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:57:19.255515733 +0000 UTC m=+1356.074075322" watchObservedRunningTime="2025-12-11 10:57:19.256477526 +0000 UTC m=+1356.075037095" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.113855 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.229732 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-config\") pod \"4b4a7e5c-045f-434d-8744-60b045803cc3\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.230118 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-ovndb-tls-certs\") pod \"4b4a7e5c-045f-434d-8744-60b045803cc3\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.230329 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-httpd-config\") pod \"4b4a7e5c-045f-434d-8744-60b045803cc3\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.230411 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-combined-ca-bundle\") pod \"4b4a7e5c-045f-434d-8744-60b045803cc3\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.230461 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlh2z\" (UniqueName: \"kubernetes.io/projected/4b4a7e5c-045f-434d-8744-60b045803cc3-kube-api-access-jlh2z\") pod \"4b4a7e5c-045f-434d-8744-60b045803cc3\" (UID: \"4b4a7e5c-045f-434d-8744-60b045803cc3\") " Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.236868 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "4b4a7e5c-045f-434d-8744-60b045803cc3" (UID: "4b4a7e5c-045f-434d-8744-60b045803cc3"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.238082 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b4a7e5c-045f-434d-8744-60b045803cc3-kube-api-access-jlh2z" (OuterVolumeSpecName: "kube-api-access-jlh2z") pod "4b4a7e5c-045f-434d-8744-60b045803cc3" (UID: "4b4a7e5c-045f-434d-8744-60b045803cc3"). InnerVolumeSpecName "kube-api-access-jlh2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.245337 5016 generic.go:334] "Generic (PLEG): container finished" podID="4b4a7e5c-045f-434d-8744-60b045803cc3" containerID="e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205" exitCode=0 Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.245412 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-76bc74566d-xxk9f" event={"ID":"4b4a7e5c-045f-434d-8744-60b045803cc3","Type":"ContainerDied","Data":"e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205"} Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.245443 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-76bc74566d-xxk9f" event={"ID":"4b4a7e5c-045f-434d-8744-60b045803cc3","Type":"ContainerDied","Data":"8649f8e1e19d247897aa0461860423d1df0332ca108cbffa5205e02012a6eb6e"} Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.245503 5016 scope.go:117] "RemoveContainer" containerID="55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.245634 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-76bc74566d-xxk9f" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.260504 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"53bba674-9897-4df3-8898-bfba40d352f2","Type":"ContainerStarted","Data":"9409c8b825ecaa3956452afd3023535eaadcb4765df8db9420dd3c8fcf783c14"} Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.278298 5016 scope.go:117] "RemoveContainer" containerID="e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.306249 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b4a7e5c-045f-434d-8744-60b045803cc3" (UID: "4b4a7e5c-045f-434d-8744-60b045803cc3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.312951 5016 scope.go:117] "RemoveContainer" containerID="55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.313143 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-config" (OuterVolumeSpecName: "config") pod "4b4a7e5c-045f-434d-8744-60b045803cc3" (UID: "4b4a7e5c-045f-434d-8744-60b045803cc3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:20 crc kubenswrapper[5016]: E1211 10:57:20.313577 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7\": container with ID starting with 55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7 not found: ID does not exist" containerID="55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.313614 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7"} err="failed to get container status \"55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7\": rpc error: code = NotFound desc = could not find container \"55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7\": container with ID starting with 55c6b1ff0429980f202d793d98851a48020b64b84a5d31f95dec38e3ce85e4c7 not found: ID does not exist" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.313643 5016 scope.go:117] "RemoveContainer" containerID="e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205" Dec 11 10:57:20 crc kubenswrapper[5016]: E1211 10:57:20.314112 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205\": container with ID starting with e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205 not found: ID does not exist" containerID="e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.314209 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205"} err="failed to get container status \"e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205\": rpc error: code = NotFound desc = could not find container \"e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205\": container with ID starting with e21ae27cb31f4b0e34e5a46389542a0d5532b40050032e2830469d6ab168b205 not found: ID does not exist" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.330469 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "4b4a7e5c-045f-434d-8744-60b045803cc3" (UID: "4b4a7e5c-045f-434d-8744-60b045803cc3"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.332428 5016 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-httpd-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.332455 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.332470 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlh2z\" (UniqueName: \"kubernetes.io/projected/4b4a7e5c-045f-434d-8744-60b045803cc3-kube-api-access-jlh2z\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.332479 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.332488 5016 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4a7e5c-045f-434d-8744-60b045803cc3-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.544379 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.595398 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-76bc74566d-xxk9f"] Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.603907 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-76bc74566d-xxk9f"] Dec 11 10:57:20 crc kubenswrapper[5016]: I1211 10:57:20.662540 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:57:21 crc kubenswrapper[5016]: I1211 10:57:21.487566 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b4a7e5c-045f-434d-8744-60b045803cc3" path="/var/lib/kubelet/pods/4b4a7e5c-045f-434d-8744-60b045803cc3/volumes" Dec 11 10:57:21 crc kubenswrapper[5016]: I1211 10:57:21.919142 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.001914 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-pd9p5"] Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.002179 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" podUID="224a0072-ebea-4ed2-9b57-af3df41200c5" containerName="dnsmasq-dns" containerID="cri-o://57b7ad75283d8bb53aaf969f78ec56f27beec9e99e52aaf26377aac252875b93" gracePeriod=10 Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.232549 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.299915 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.336568 5016 generic.go:334] "Generic (PLEG): container finished" podID="224a0072-ebea-4ed2-9b57-af3df41200c5" containerID="57b7ad75283d8bb53aaf969f78ec56f27beec9e99e52aaf26377aac252875b93" exitCode=0 Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.336642 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" event={"ID":"224a0072-ebea-4ed2-9b57-af3df41200c5","Type":"ContainerDied","Data":"57b7ad75283d8bb53aaf969f78ec56f27beec9e99e52aaf26377aac252875b93"} Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.346233 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"53bba674-9897-4df3-8898-bfba40d352f2","Type":"ContainerStarted","Data":"82e929c0de411a73ee6f81bf7c3079d6bca91bd80d03316c5158e602327bfa6e"} Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.346342 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" containerName="cinder-scheduler" containerID="cri-o://66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5" gracePeriod=30 Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.346465 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" containerName="probe" containerID="cri-o://6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a" gracePeriod=30 Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.544328 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.550464 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.570023 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.484263531 podStartE2EDuration="6.570004642s" podCreationTimestamp="2025-12-11 10:57:16 +0000 UTC" firstStartedPulling="2025-12-11 10:57:17.154603683 +0000 UTC m=+1353.973163262" lastFinishedPulling="2025-12-11 10:57:21.240344794 +0000 UTC m=+1358.058904373" observedRunningTime="2025-12-11 10:57:22.39862105 +0000 UTC m=+1359.217180639" watchObservedRunningTime="2025-12-11 10:57:22.570004642 +0000 UTC m=+1359.388564211" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.707530 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-dns-svc\") pod \"224a0072-ebea-4ed2-9b57-af3df41200c5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.707643 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-ovsdbserver-nb\") pod \"224a0072-ebea-4ed2-9b57-af3df41200c5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.707780 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7k4d\" (UniqueName: \"kubernetes.io/projected/224a0072-ebea-4ed2-9b57-af3df41200c5-kube-api-access-k7k4d\") pod \"224a0072-ebea-4ed2-9b57-af3df41200c5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.707836 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-config\") pod \"224a0072-ebea-4ed2-9b57-af3df41200c5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.707903 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-ovsdbserver-sb\") pod \"224a0072-ebea-4ed2-9b57-af3df41200c5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.708497 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-dns-swift-storage-0\") pod \"224a0072-ebea-4ed2-9b57-af3df41200c5\" (UID: \"224a0072-ebea-4ed2-9b57-af3df41200c5\") " Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.733106 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/224a0072-ebea-4ed2-9b57-af3df41200c5-kube-api-access-k7k4d" (OuterVolumeSpecName: "kube-api-access-k7k4d") pod "224a0072-ebea-4ed2-9b57-af3df41200c5" (UID: "224a0072-ebea-4ed2-9b57-af3df41200c5"). InnerVolumeSpecName "kube-api-access-k7k4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.761706 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "224a0072-ebea-4ed2-9b57-af3df41200c5" (UID: "224a0072-ebea-4ed2-9b57-af3df41200c5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.771172 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "224a0072-ebea-4ed2-9b57-af3df41200c5" (UID: "224a0072-ebea-4ed2-9b57-af3df41200c5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.772350 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "224a0072-ebea-4ed2-9b57-af3df41200c5" (UID: "224a0072-ebea-4ed2-9b57-af3df41200c5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.786674 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-config" (OuterVolumeSpecName: "config") pod "224a0072-ebea-4ed2-9b57-af3df41200c5" (UID: "224a0072-ebea-4ed2-9b57-af3df41200c5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.794923 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "224a0072-ebea-4ed2-9b57-af3df41200c5" (UID: "224a0072-ebea-4ed2-9b57-af3df41200c5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.813320 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.813362 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7k4d\" (UniqueName: \"kubernetes.io/projected/224a0072-ebea-4ed2-9b57-af3df41200c5-kube-api-access-k7k4d\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.813378 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.813390 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.813404 5016 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.813415 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/224a0072-ebea-4ed2-9b57-af3df41200c5-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:22 crc kubenswrapper[5016]: I1211 10:57:22.956312 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7df5fc4844-wdnrz" Dec 11 10:57:23 crc kubenswrapper[5016]: I1211 10:57:23.034444 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-78bccb96bd-btt5f"] Dec 11 10:57:23 crc kubenswrapper[5016]: I1211 10:57:23.358495 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" Dec 11 10:57:23 crc kubenswrapper[5016]: I1211 10:57:23.358501 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-pd9p5" event={"ID":"224a0072-ebea-4ed2-9b57-af3df41200c5","Type":"ContainerDied","Data":"d8b134364b1c2f65ce4dff435a683263fecad45c20df1dc1e4019c6ca9f7561c"} Dec 11 10:57:23 crc kubenswrapper[5016]: I1211 10:57:23.358590 5016 scope.go:117] "RemoveContainer" containerID="57b7ad75283d8bb53aaf969f78ec56f27beec9e99e52aaf26377aac252875b93" Dec 11 10:57:23 crc kubenswrapper[5016]: I1211 10:57:23.358681 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 10:57:23 crc kubenswrapper[5016]: I1211 10:57:23.358753 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-78bccb96bd-btt5f" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon" containerID="cri-o://ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887" gracePeriod=30 Dec 11 10:57:23 crc kubenswrapper[5016]: I1211 10:57:23.358667 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-78bccb96bd-btt5f" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon-log" containerID="cri-o://abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900" gracePeriod=30 Dec 11 10:57:23 crc kubenswrapper[5016]: I1211 10:57:23.425029 5016 scope.go:117] "RemoveContainer" containerID="ff8ae82f6bbc99792abf528b71021bb7957e55871e76f100c7a660d288f2d041" Dec 11 10:57:23 crc kubenswrapper[5016]: I1211 10:57:23.442867 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-pd9p5"] Dec 11 10:57:23 crc kubenswrapper[5016]: I1211 10:57:23.451233 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-pd9p5"] Dec 11 10:57:23 crc kubenswrapper[5016]: I1211 10:57:23.487560 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="224a0072-ebea-4ed2-9b57-af3df41200c5" path="/var/lib/kubelet/pods/224a0072-ebea-4ed2-9b57-af3df41200c5/volumes" Dec 11 10:57:24 crc kubenswrapper[5016]: I1211 10:57:24.372533 5016 generic.go:334] "Generic (PLEG): container finished" podID="97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" containerID="6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a" exitCode=0 Dec 11 10:57:24 crc kubenswrapper[5016]: I1211 10:57:24.372616 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b","Type":"ContainerDied","Data":"6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a"} Dec 11 10:57:26 crc kubenswrapper[5016]: I1211 10:57:26.185876 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:57:26 crc kubenswrapper[5016]: I1211 10:57:26.274678 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-676fd6784-tg4g7" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.348267 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.350560 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-589444b9f8-c7wwh" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.412161 5016 generic.go:334] "Generic (PLEG): container finished" podID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerID="ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887" exitCode=0 Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.412238 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78bccb96bd-btt5f" event={"ID":"6f611e53-2b48-4371-8673-dd02e7533a7d","Type":"ContainerDied","Data":"ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887"} Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.418025 5016 generic.go:334] "Generic (PLEG): container finished" podID="97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" containerID="66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5" exitCode=0 Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.418079 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b","Type":"ContainerDied","Data":"66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5"} Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.418113 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b","Type":"ContainerDied","Data":"70d071cd880ac2b641d8b499d064e4192e3377abe767a2d70dc3ceabb0c87458"} Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.418136 5016 scope.go:117] "RemoveContainer" containerID="6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.418311 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.452086 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sk7h4\" (UniqueName: \"kubernetes.io/projected/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-kube-api-access-sk7h4\") pod \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.452280 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-config-data\") pod \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.452305 5016 scope.go:117] "RemoveContainer" containerID="66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.452350 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-scripts\") pod \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.452390 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-config-data-custom\") pod \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.452431 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-combined-ca-bundle\") pod \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.452477 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-etc-machine-id\") pod \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\" (UID: \"97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b\") " Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.454263 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" (UID: "97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.462224 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" (UID: "97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.463997 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-scripts" (OuterVolumeSpecName: "scripts") pod "97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" (UID: "97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.467141 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-kube-api-access-sk7h4" (OuterVolumeSpecName: "kube-api-access-sk7h4") pod "97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" (UID: "97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b"). InnerVolumeSpecName "kube-api-access-sk7h4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.527782 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" (UID: "97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.533393 5016 scope.go:117] "RemoveContainer" containerID="6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a" Dec 11 10:57:27 crc kubenswrapper[5016]: E1211 10:57:27.533937 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a\": container with ID starting with 6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a not found: ID does not exist" containerID="6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.534043 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a"} err="failed to get container status \"6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a\": rpc error: code = NotFound desc = could not find container \"6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a\": container with ID starting with 6471ab6316b3f872cca2a99b62f366011037cd9936f3e6ef177a9d1219dfad5a not found: ID does not exist" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.534078 5016 scope.go:117] "RemoveContainer" containerID="66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5" Dec 11 10:57:27 crc kubenswrapper[5016]: E1211 10:57:27.534529 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5\": container with ID starting with 66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5 not found: ID does not exist" containerID="66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.534568 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5"} err="failed to get container status \"66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5\": rpc error: code = NotFound desc = could not find container \"66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5\": container with ID starting with 66415d788d3ec50e07668eaf2d8aaf13333cf998c0a4ab14bc02448809fcd4b5 not found: ID does not exist" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.557135 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.557758 5016 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.557836 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.557901 5016 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.557981 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sk7h4\" (UniqueName: \"kubernetes.io/projected/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-kube-api-access-sk7h4\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.593400 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-config-data" (OuterVolumeSpecName: "config-data") pod "97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" (UID: "97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.660251 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.762760 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.781825 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.796236 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 10:57:27 crc kubenswrapper[5016]: E1211 10:57:27.796912 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b4a7e5c-045f-434d-8744-60b045803cc3" containerName="neutron-httpd" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.796957 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b4a7e5c-045f-434d-8744-60b045803cc3" containerName="neutron-httpd" Dec 11 10:57:27 crc kubenswrapper[5016]: E1211 10:57:27.796978 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="224a0072-ebea-4ed2-9b57-af3df41200c5" containerName="init" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.796989 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="224a0072-ebea-4ed2-9b57-af3df41200c5" containerName="init" Dec 11 10:57:27 crc kubenswrapper[5016]: E1211 10:57:27.797018 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" containerName="cinder-scheduler" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.797024 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" containerName="cinder-scheduler" Dec 11 10:57:27 crc kubenswrapper[5016]: E1211 10:57:27.797037 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="224a0072-ebea-4ed2-9b57-af3df41200c5" containerName="dnsmasq-dns" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.797045 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="224a0072-ebea-4ed2-9b57-af3df41200c5" containerName="dnsmasq-dns" Dec 11 10:57:27 crc kubenswrapper[5016]: E1211 10:57:27.797060 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b4a7e5c-045f-434d-8744-60b045803cc3" containerName="neutron-api" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.797071 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b4a7e5c-045f-434d-8744-60b045803cc3" containerName="neutron-api" Dec 11 10:57:27 crc kubenswrapper[5016]: E1211 10:57:27.797087 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" containerName="probe" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.797095 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" containerName="probe" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.797374 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" containerName="probe" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.797405 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b4a7e5c-045f-434d-8744-60b045803cc3" containerName="neutron-api" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.797414 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" containerName="cinder-scheduler" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.797426 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b4a7e5c-045f-434d-8744-60b045803cc3" containerName="neutron-httpd" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.797453 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="224a0072-ebea-4ed2-9b57-af3df41200c5" containerName="dnsmasq-dns" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.799025 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.803682 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.808168 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.864415 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f265b9d-c475-455f-9fe7-05070efd4ec1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.864528 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0f265b9d-c475-455f-9fe7-05070efd4ec1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.864922 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f265b9d-c475-455f-9fe7-05070efd4ec1-scripts\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.865101 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f265b9d-c475-455f-9fe7-05070efd4ec1-config-data\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.865366 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f265b9d-c475-455f-9fe7-05070efd4ec1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.865400 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qwfd\" (UniqueName: \"kubernetes.io/projected/0f265b9d-c475-455f-9fe7-05070efd4ec1-kube-api-access-2qwfd\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.967664 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f265b9d-c475-455f-9fe7-05070efd4ec1-scripts\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.967762 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f265b9d-c475-455f-9fe7-05070efd4ec1-config-data\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.967822 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f265b9d-c475-455f-9fe7-05070efd4ec1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.967848 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qwfd\" (UniqueName: \"kubernetes.io/projected/0f265b9d-c475-455f-9fe7-05070efd4ec1-kube-api-access-2qwfd\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.967903 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f265b9d-c475-455f-9fe7-05070efd4ec1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.967928 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0f265b9d-c475-455f-9fe7-05070efd4ec1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.968072 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0f265b9d-c475-455f-9fe7-05070efd4ec1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.974099 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f265b9d-c475-455f-9fe7-05070efd4ec1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.974209 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f265b9d-c475-455f-9fe7-05070efd4ec1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.975226 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f265b9d-c475-455f-9fe7-05070efd4ec1-config-data\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.983481 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f265b9d-c475-455f-9fe7-05070efd4ec1-scripts\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:27 crc kubenswrapper[5016]: I1211 10:57:27.993541 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qwfd\" (UniqueName: \"kubernetes.io/projected/0f265b9d-c475-455f-9fe7-05070efd4ec1-kube-api-access-2qwfd\") pod \"cinder-scheduler-0\" (UID: \"0f265b9d-c475-455f-9fe7-05070efd4ec1\") " pod="openstack/cinder-scheduler-0" Dec 11 10:57:28 crc kubenswrapper[5016]: I1211 10:57:28.151199 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 11 10:57:28 crc kubenswrapper[5016]: I1211 10:57:28.322420 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-78bccb96bd-btt5f" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Dec 11 10:57:28 crc kubenswrapper[5016]: I1211 10:57:28.619856 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 11 10:57:29 crc kubenswrapper[5016]: I1211 10:57:29.065138 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Dec 11 10:57:29 crc kubenswrapper[5016]: I1211 10:57:29.451331 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0f265b9d-c475-455f-9fe7-05070efd4ec1","Type":"ContainerStarted","Data":"007c5faaf0ae04e31afd82044aae27310e87eb413c6b5c8c11e14baf1ac0e010"} Dec 11 10:57:29 crc kubenswrapper[5016]: I1211 10:57:29.451684 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0f265b9d-c475-455f-9fe7-05070efd4ec1","Type":"ContainerStarted","Data":"b84695cf25334fd5f9ab3f60b355aa600b2108638b8e02f860f46c2de3085328"} Dec 11 10:57:29 crc kubenswrapper[5016]: I1211 10:57:29.487858 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b" path="/var/lib/kubelet/pods/97a53b6e-c2fd-4f0b-97d6-cc8a33fdcc9b/volumes" Dec 11 10:57:30 crc kubenswrapper[5016]: I1211 10:57:30.485307 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0f265b9d-c475-455f-9fe7-05070efd4ec1","Type":"ContainerStarted","Data":"e8634b5197b20ab118a2e0a836f2d5a8a549fdcaab51f477c73470953bb33b32"} Dec 11 10:57:30 crc kubenswrapper[5016]: I1211 10:57:30.519543 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.519519156 podStartE2EDuration="3.519519156s" podCreationTimestamp="2025-12-11 10:57:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:57:30.509332081 +0000 UTC m=+1367.327891680" watchObservedRunningTime="2025-12-11 10:57:30.519519156 +0000 UTC m=+1367.338078735" Dec 11 10:57:30 crc kubenswrapper[5016]: I1211 10:57:30.922470 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 11 10:57:30 crc kubenswrapper[5016]: I1211 10:57:30.924026 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 11 10:57:30 crc kubenswrapper[5016]: I1211 10:57:30.926663 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Dec 11 10:57:30 crc kubenswrapper[5016]: I1211 10:57:30.926774 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Dec 11 10:57:30 crc kubenswrapper[5016]: I1211 10:57:30.928675 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-9lpz2" Dec 11 10:57:30 crc kubenswrapper[5016]: I1211 10:57:30.941960 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.068117 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ab24d20-cfe0-4aeb-a0df-e0d0b245e863-combined-ca-bundle\") pod \"openstackclient\" (UID: \"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863\") " pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.068657 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5ab24d20-cfe0-4aeb-a0df-e0d0b245e863-openstack-config\") pod \"openstackclient\" (UID: \"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863\") " pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.068700 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5ab24d20-cfe0-4aeb-a0df-e0d0b245e863-openstack-config-secret\") pod \"openstackclient\" (UID: \"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863\") " pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.068753 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvgbb\" (UniqueName: \"kubernetes.io/projected/5ab24d20-cfe0-4aeb-a0df-e0d0b245e863-kube-api-access-hvgbb\") pod \"openstackclient\" (UID: \"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863\") " pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.170670 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ab24d20-cfe0-4aeb-a0df-e0d0b245e863-combined-ca-bundle\") pod \"openstackclient\" (UID: \"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863\") " pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.170752 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5ab24d20-cfe0-4aeb-a0df-e0d0b245e863-openstack-config\") pod \"openstackclient\" (UID: \"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863\") " pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.170789 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5ab24d20-cfe0-4aeb-a0df-e0d0b245e863-openstack-config-secret\") pod \"openstackclient\" (UID: \"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863\") " pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.170823 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvgbb\" (UniqueName: \"kubernetes.io/projected/5ab24d20-cfe0-4aeb-a0df-e0d0b245e863-kube-api-access-hvgbb\") pod \"openstackclient\" (UID: \"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863\") " pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.172927 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5ab24d20-cfe0-4aeb-a0df-e0d0b245e863-openstack-config\") pod \"openstackclient\" (UID: \"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863\") " pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.188887 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ab24d20-cfe0-4aeb-a0df-e0d0b245e863-combined-ca-bundle\") pod \"openstackclient\" (UID: \"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863\") " pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.199701 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5ab24d20-cfe0-4aeb-a0df-e0d0b245e863-openstack-config-secret\") pod \"openstackclient\" (UID: \"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863\") " pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.214825 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvgbb\" (UniqueName: \"kubernetes.io/projected/5ab24d20-cfe0-4aeb-a0df-e0d0b245e863-kube-api-access-hvgbb\") pod \"openstackclient\" (UID: \"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863\") " pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.254012 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 11 10:57:31 crc kubenswrapper[5016]: I1211 10:57:31.735743 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 11 10:57:31 crc kubenswrapper[5016]: W1211 10:57:31.747660 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5ab24d20_cfe0_4aeb_a0df_e0d0b245e863.slice/crio-0594943df0f7254db62feac27bf0486ac03c5ff4b4dac6069e390913b87fe4dd WatchSource:0}: Error finding container 0594943df0f7254db62feac27bf0486ac03c5ff4b4dac6069e390913b87fe4dd: Status 404 returned error can't find the container with id 0594943df0f7254db62feac27bf0486ac03c5ff4b4dac6069e390913b87fe4dd Dec 11 10:57:32 crc kubenswrapper[5016]: I1211 10:57:32.517404 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863","Type":"ContainerStarted","Data":"0594943df0f7254db62feac27bf0486ac03c5ff4b4dac6069e390913b87fe4dd"} Dec 11 10:57:33 crc kubenswrapper[5016]: I1211 10:57:33.155474 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.310443 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-58f98f7fd9-rtbw4"] Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.312539 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.316192 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.316922 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.317045 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.322410 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-58f98f7fd9-rtbw4"] Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.471218 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2816d686-f2da-4306-9b07-b27dc9eb88f5-internal-tls-certs\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.471285 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2816d686-f2da-4306-9b07-b27dc9eb88f5-run-httpd\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.471315 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2816d686-f2da-4306-9b07-b27dc9eb88f5-config-data\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.471340 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2816d686-f2da-4306-9b07-b27dc9eb88f5-etc-swift\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.471411 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2816d686-f2da-4306-9b07-b27dc9eb88f5-combined-ca-bundle\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.471564 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2816d686-f2da-4306-9b07-b27dc9eb88f5-public-tls-certs\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.471693 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mw2v\" (UniqueName: \"kubernetes.io/projected/2816d686-f2da-4306-9b07-b27dc9eb88f5-kube-api-access-6mw2v\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.471972 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2816d686-f2da-4306-9b07-b27dc9eb88f5-log-httpd\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.573500 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2816d686-f2da-4306-9b07-b27dc9eb88f5-combined-ca-bundle\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.573558 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2816d686-f2da-4306-9b07-b27dc9eb88f5-public-tls-certs\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.573594 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mw2v\" (UniqueName: \"kubernetes.io/projected/2816d686-f2da-4306-9b07-b27dc9eb88f5-kube-api-access-6mw2v\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.573667 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2816d686-f2da-4306-9b07-b27dc9eb88f5-log-httpd\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.573715 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2816d686-f2da-4306-9b07-b27dc9eb88f5-internal-tls-certs\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.573737 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2816d686-f2da-4306-9b07-b27dc9eb88f5-run-httpd\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.573765 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2816d686-f2da-4306-9b07-b27dc9eb88f5-config-data\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.573796 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2816d686-f2da-4306-9b07-b27dc9eb88f5-etc-swift\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.574655 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2816d686-f2da-4306-9b07-b27dc9eb88f5-run-httpd\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.575327 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2816d686-f2da-4306-9b07-b27dc9eb88f5-log-httpd\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.585010 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2816d686-f2da-4306-9b07-b27dc9eb88f5-combined-ca-bundle\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.588569 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2816d686-f2da-4306-9b07-b27dc9eb88f5-config-data\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.591815 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2816d686-f2da-4306-9b07-b27dc9eb88f5-etc-swift\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.592191 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2816d686-f2da-4306-9b07-b27dc9eb88f5-public-tls-certs\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.593469 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2816d686-f2da-4306-9b07-b27dc9eb88f5-internal-tls-certs\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.597089 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mw2v\" (UniqueName: \"kubernetes.io/projected/2816d686-f2da-4306-9b07-b27dc9eb88f5-kube-api-access-6mw2v\") pod \"swift-proxy-58f98f7fd9-rtbw4\" (UID: \"2816d686-f2da-4306-9b07-b27dc9eb88f5\") " pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:35 crc kubenswrapper[5016]: I1211 10:57:35.668816 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:36 crc kubenswrapper[5016]: W1211 10:57:36.257924 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2816d686_f2da_4306_9b07_b27dc9eb88f5.slice/crio-8f9a8dbeca07deb1f601812526f3abc61fb5a0d14a906d625d89ed9dd96de3c8 WatchSource:0}: Error finding container 8f9a8dbeca07deb1f601812526f3abc61fb5a0d14a906d625d89ed9dd96de3c8: Status 404 returned error can't find the container with id 8f9a8dbeca07deb1f601812526f3abc61fb5a0d14a906d625d89ed9dd96de3c8 Dec 11 10:57:36 crc kubenswrapper[5016]: I1211 10:57:36.258347 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-58f98f7fd9-rtbw4"] Dec 11 10:57:36 crc kubenswrapper[5016]: I1211 10:57:36.561578 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-58f98f7fd9-rtbw4" event={"ID":"2816d686-f2da-4306-9b07-b27dc9eb88f5","Type":"ContainerStarted","Data":"38343dd6f1fcdf0615db171bf4a024f10e1dab3628d974869835e935e1c44ea6"} Dec 11 10:57:36 crc kubenswrapper[5016]: I1211 10:57:36.561976 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-58f98f7fd9-rtbw4" event={"ID":"2816d686-f2da-4306-9b07-b27dc9eb88f5","Type":"ContainerStarted","Data":"8f9a8dbeca07deb1f601812526f3abc61fb5a0d14a906d625d89ed9dd96de3c8"} Dec 11 10:57:36 crc kubenswrapper[5016]: I1211 10:57:36.835735 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:36 crc kubenswrapper[5016]: I1211 10:57:36.836296 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="ceilometer-central-agent" containerID="cri-o://d7d97a58f39c8cc6305c4b3e468721854224785535decd54547ea267ef281951" gracePeriod=30 Dec 11 10:57:36 crc kubenswrapper[5016]: I1211 10:57:36.836445 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="proxy-httpd" containerID="cri-o://82e929c0de411a73ee6f81bf7c3079d6bca91bd80d03316c5158e602327bfa6e" gracePeriod=30 Dec 11 10:57:36 crc kubenswrapper[5016]: I1211 10:57:36.836507 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="sg-core" containerID="cri-o://9409c8b825ecaa3956452afd3023535eaadcb4765df8db9420dd3c8fcf783c14" gracePeriod=30 Dec 11 10:57:36 crc kubenswrapper[5016]: I1211 10:57:36.836558 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="ceilometer-notification-agent" containerID="cri-o://9d1b4a05f1389775128fcbbceb131df2c8eac3e95800a2adc09bd6c8934d0305" gracePeriod=30 Dec 11 10:57:36 crc kubenswrapper[5016]: I1211 10:57:36.846418 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.163:3000/\": EOF" Dec 11 10:57:37 crc kubenswrapper[5016]: I1211 10:57:37.575349 5016 generic.go:334] "Generic (PLEG): container finished" podID="53bba674-9897-4df3-8898-bfba40d352f2" containerID="82e929c0de411a73ee6f81bf7c3079d6bca91bd80d03316c5158e602327bfa6e" exitCode=0 Dec 11 10:57:37 crc kubenswrapper[5016]: I1211 10:57:37.575649 5016 generic.go:334] "Generic (PLEG): container finished" podID="53bba674-9897-4df3-8898-bfba40d352f2" containerID="9409c8b825ecaa3956452afd3023535eaadcb4765df8db9420dd3c8fcf783c14" exitCode=2 Dec 11 10:57:37 crc kubenswrapper[5016]: I1211 10:57:37.575660 5016 generic.go:334] "Generic (PLEG): container finished" podID="53bba674-9897-4df3-8898-bfba40d352f2" containerID="d7d97a58f39c8cc6305c4b3e468721854224785535decd54547ea267ef281951" exitCode=0 Dec 11 10:57:37 crc kubenswrapper[5016]: I1211 10:57:37.575449 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"53bba674-9897-4df3-8898-bfba40d352f2","Type":"ContainerDied","Data":"82e929c0de411a73ee6f81bf7c3079d6bca91bd80d03316c5158e602327bfa6e"} Dec 11 10:57:37 crc kubenswrapper[5016]: I1211 10:57:37.575723 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"53bba674-9897-4df3-8898-bfba40d352f2","Type":"ContainerDied","Data":"9409c8b825ecaa3956452afd3023535eaadcb4765df8db9420dd3c8fcf783c14"} Dec 11 10:57:37 crc kubenswrapper[5016]: I1211 10:57:37.575740 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"53bba674-9897-4df3-8898-bfba40d352f2","Type":"ContainerDied","Data":"d7d97a58f39c8cc6305c4b3e468721854224785535decd54547ea267ef281951"} Dec 11 10:57:37 crc kubenswrapper[5016]: I1211 10:57:37.580095 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-58f98f7fd9-rtbw4" event={"ID":"2816d686-f2da-4306-9b07-b27dc9eb88f5","Type":"ContainerStarted","Data":"4f37489fbedb862204a0f284e2098dffd1b4d99e901153e2d1e168f9dfbf2deb"} Dec 11 10:57:37 crc kubenswrapper[5016]: I1211 10:57:37.580406 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:37 crc kubenswrapper[5016]: I1211 10:57:37.580461 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:37 crc kubenswrapper[5016]: I1211 10:57:37.611127 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-58f98f7fd9-rtbw4" podStartSLOduration=2.611097516 podStartE2EDuration="2.611097516s" podCreationTimestamp="2025-12-11 10:57:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:57:37.597850257 +0000 UTC m=+1374.416409856" watchObservedRunningTime="2025-12-11 10:57:37.611097516 +0000 UTC m=+1374.429657105" Dec 11 10:57:38 crc kubenswrapper[5016]: I1211 10:57:38.320090 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-78bccb96bd-btt5f" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Dec 11 10:57:38 crc kubenswrapper[5016]: I1211 10:57:38.513860 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 11 10:57:41 crc kubenswrapper[5016]: I1211 10:57:41.640211 5016 generic.go:334] "Generic (PLEG): container finished" podID="53bba674-9897-4df3-8898-bfba40d352f2" containerID="9d1b4a05f1389775128fcbbceb131df2c8eac3e95800a2adc09bd6c8934d0305" exitCode=0 Dec 11 10:57:41 crc kubenswrapper[5016]: I1211 10:57:41.640290 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"53bba674-9897-4df3-8898-bfba40d352f2","Type":"ContainerDied","Data":"9d1b4a05f1389775128fcbbceb131df2c8eac3e95800a2adc09bd6c8934d0305"} Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.513846 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-n4qm6"] Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.521009 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-n4qm6" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.535264 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-n4qm6"] Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.570832 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zktw\" (UniqueName: \"kubernetes.io/projected/c8a2de44-054b-4a31-8d4e-d88d349d59f5-kube-api-access-7zktw\") pod \"nova-api-db-create-n4qm6\" (UID: \"c8a2de44-054b-4a31-8d4e-d88d349d59f5\") " pod="openstack/nova-api-db-create-n4qm6" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.571039 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8a2de44-054b-4a31-8d4e-d88d349d59f5-operator-scripts\") pod \"nova-api-db-create-n4qm6\" (UID: \"c8a2de44-054b-4a31-8d4e-d88d349d59f5\") " pod="openstack/nova-api-db-create-n4qm6" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.617100 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-r4v66"] Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.618535 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-r4v66" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.633049 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-r4v66"] Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.679978 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5xmk\" (UniqueName: \"kubernetes.io/projected/ddbcb259-d1f7-4de6-b255-114890395ec8-kube-api-access-m5xmk\") pod \"nova-cell0-db-create-r4v66\" (UID: \"ddbcb259-d1f7-4de6-b255-114890395ec8\") " pod="openstack/nova-cell0-db-create-r4v66" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.680037 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddbcb259-d1f7-4de6-b255-114890395ec8-operator-scripts\") pod \"nova-cell0-db-create-r4v66\" (UID: \"ddbcb259-d1f7-4de6-b255-114890395ec8\") " pod="openstack/nova-cell0-db-create-r4v66" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.680082 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8a2de44-054b-4a31-8d4e-d88d349d59f5-operator-scripts\") pod \"nova-api-db-create-n4qm6\" (UID: \"c8a2de44-054b-4a31-8d4e-d88d349d59f5\") " pod="openstack/nova-api-db-create-n4qm6" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.680221 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zktw\" (UniqueName: \"kubernetes.io/projected/c8a2de44-054b-4a31-8d4e-d88d349d59f5-kube-api-access-7zktw\") pod \"nova-api-db-create-n4qm6\" (UID: \"c8a2de44-054b-4a31-8d4e-d88d349d59f5\") " pod="openstack/nova-api-db-create-n4qm6" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.697014 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8a2de44-054b-4a31-8d4e-d88d349d59f5-operator-scripts\") pod \"nova-api-db-create-n4qm6\" (UID: \"c8a2de44-054b-4a31-8d4e-d88d349d59f5\") " pod="openstack/nova-api-db-create-n4qm6" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.716705 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zktw\" (UniqueName: \"kubernetes.io/projected/c8a2de44-054b-4a31-8d4e-d88d349d59f5-kube-api-access-7zktw\") pod \"nova-api-db-create-n4qm6\" (UID: \"c8a2de44-054b-4a31-8d4e-d88d349d59f5\") " pod="openstack/nova-api-db-create-n4qm6" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.725233 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-2d6c-account-create-update-6j78f"] Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.726712 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2d6c-account-create-update-6j78f" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.729011 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.744881 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2d6c-account-create-update-6j78f"] Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.781272 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm6jk\" (UniqueName: \"kubernetes.io/projected/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75-kube-api-access-xm6jk\") pod \"nova-api-2d6c-account-create-update-6j78f\" (UID: \"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75\") " pod="openstack/nova-api-2d6c-account-create-update-6j78f" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.781331 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5xmk\" (UniqueName: \"kubernetes.io/projected/ddbcb259-d1f7-4de6-b255-114890395ec8-kube-api-access-m5xmk\") pod \"nova-cell0-db-create-r4v66\" (UID: \"ddbcb259-d1f7-4de6-b255-114890395ec8\") " pod="openstack/nova-cell0-db-create-r4v66" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.781359 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddbcb259-d1f7-4de6-b255-114890395ec8-operator-scripts\") pod \"nova-cell0-db-create-r4v66\" (UID: \"ddbcb259-d1f7-4de6-b255-114890395ec8\") " pod="openstack/nova-cell0-db-create-r4v66" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.781396 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75-operator-scripts\") pod \"nova-api-2d6c-account-create-update-6j78f\" (UID: \"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75\") " pod="openstack/nova-api-2d6c-account-create-update-6j78f" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.783922 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddbcb259-d1f7-4de6-b255-114890395ec8-operator-scripts\") pod \"nova-cell0-db-create-r4v66\" (UID: \"ddbcb259-d1f7-4de6-b255-114890395ec8\") " pod="openstack/nova-cell0-db-create-r4v66" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.816478 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5xmk\" (UniqueName: \"kubernetes.io/projected/ddbcb259-d1f7-4de6-b255-114890395ec8-kube-api-access-m5xmk\") pod \"nova-cell0-db-create-r4v66\" (UID: \"ddbcb259-d1f7-4de6-b255-114890395ec8\") " pod="openstack/nova-cell0-db-create-r4v66" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.817004 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-n667p"] Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.818519 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-n667p" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.838403 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-n667p"] Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.883238 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-n4qm6" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.886349 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8jbg\" (UniqueName: \"kubernetes.io/projected/2c350656-c130-4eef-8c2f-be3f74dc25f4-kube-api-access-f8jbg\") pod \"nova-cell1-db-create-n667p\" (UID: \"2c350656-c130-4eef-8c2f-be3f74dc25f4\") " pod="openstack/nova-cell1-db-create-n667p" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.886414 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm6jk\" (UniqueName: \"kubernetes.io/projected/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75-kube-api-access-xm6jk\") pod \"nova-api-2d6c-account-create-update-6j78f\" (UID: \"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75\") " pod="openstack/nova-api-2d6c-account-create-update-6j78f" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.886436 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c350656-c130-4eef-8c2f-be3f74dc25f4-operator-scripts\") pod \"nova-cell1-db-create-n667p\" (UID: \"2c350656-c130-4eef-8c2f-be3f74dc25f4\") " pod="openstack/nova-cell1-db-create-n667p" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.886487 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75-operator-scripts\") pod \"nova-api-2d6c-account-create-update-6j78f\" (UID: \"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75\") " pod="openstack/nova-api-2d6c-account-create-update-6j78f" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.890241 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75-operator-scripts\") pod \"nova-api-2d6c-account-create-update-6j78f\" (UID: \"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75\") " pod="openstack/nova-api-2d6c-account-create-update-6j78f" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.917969 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm6jk\" (UniqueName: \"kubernetes.io/projected/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75-kube-api-access-xm6jk\") pod \"nova-api-2d6c-account-create-update-6j78f\" (UID: \"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75\") " pod="openstack/nova-api-2d6c-account-create-update-6j78f" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.925690 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-57f7-account-create-update-8rkld"] Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.927077 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-57f7-account-create-update-8rkld" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.929915 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.941706 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-r4v66" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.961655 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-57f7-account-create-update-8rkld"] Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.992439 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kr7d4\" (UniqueName: \"kubernetes.io/projected/41bed616-8e34-49a0-9ade-3b17f7988491-kube-api-access-kr7d4\") pod \"nova-cell0-57f7-account-create-update-8rkld\" (UID: \"41bed616-8e34-49a0-9ade-3b17f7988491\") " pod="openstack/nova-cell0-57f7-account-create-update-8rkld" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.992878 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c350656-c130-4eef-8c2f-be3f74dc25f4-operator-scripts\") pod \"nova-cell1-db-create-n667p\" (UID: \"2c350656-c130-4eef-8c2f-be3f74dc25f4\") " pod="openstack/nova-cell1-db-create-n667p" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.993181 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41bed616-8e34-49a0-9ade-3b17f7988491-operator-scripts\") pod \"nova-cell0-57f7-account-create-update-8rkld\" (UID: \"41bed616-8e34-49a0-9ade-3b17f7988491\") " pod="openstack/nova-cell0-57f7-account-create-update-8rkld" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.993483 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8jbg\" (UniqueName: \"kubernetes.io/projected/2c350656-c130-4eef-8c2f-be3f74dc25f4-kube-api-access-f8jbg\") pod \"nova-cell1-db-create-n667p\" (UID: \"2c350656-c130-4eef-8c2f-be3f74dc25f4\") " pod="openstack/nova-cell1-db-create-n667p" Dec 11 10:57:43 crc kubenswrapper[5016]: I1211 10:57:43.994150 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c350656-c130-4eef-8c2f-be3f74dc25f4-operator-scripts\") pod \"nova-cell1-db-create-n667p\" (UID: \"2c350656-c130-4eef-8c2f-be3f74dc25f4\") " pod="openstack/nova-cell1-db-create-n667p" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.015022 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8jbg\" (UniqueName: \"kubernetes.io/projected/2c350656-c130-4eef-8c2f-be3f74dc25f4-kube-api-access-f8jbg\") pod \"nova-cell1-db-create-n667p\" (UID: \"2c350656-c130-4eef-8c2f-be3f74dc25f4\") " pod="openstack/nova-cell1-db-create-n667p" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.095114 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41bed616-8e34-49a0-9ade-3b17f7988491-operator-scripts\") pod \"nova-cell0-57f7-account-create-update-8rkld\" (UID: \"41bed616-8e34-49a0-9ade-3b17f7988491\") " pod="openstack/nova-cell0-57f7-account-create-update-8rkld" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.095344 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kr7d4\" (UniqueName: \"kubernetes.io/projected/41bed616-8e34-49a0-9ade-3b17f7988491-kube-api-access-kr7d4\") pod \"nova-cell0-57f7-account-create-update-8rkld\" (UID: \"41bed616-8e34-49a0-9ade-3b17f7988491\") " pod="openstack/nova-cell0-57f7-account-create-update-8rkld" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.096182 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41bed616-8e34-49a0-9ade-3b17f7988491-operator-scripts\") pod \"nova-cell0-57f7-account-create-update-8rkld\" (UID: \"41bed616-8e34-49a0-9ade-3b17f7988491\") " pod="openstack/nova-cell0-57f7-account-create-update-8rkld" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.096337 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2d6c-account-create-update-6j78f" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.117520 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-edf2-account-create-update-zn58v"] Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.120982 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kr7d4\" (UniqueName: \"kubernetes.io/projected/41bed616-8e34-49a0-9ade-3b17f7988491-kube-api-access-kr7d4\") pod \"nova-cell0-57f7-account-create-update-8rkld\" (UID: \"41bed616-8e34-49a0-9ade-3b17f7988491\") " pod="openstack/nova-cell0-57f7-account-create-update-8rkld" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.132722 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-edf2-account-create-update-zn58v" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.139297 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.172316 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-edf2-account-create-update-zn58v"] Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.198144 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd14602e-defa-4080-a30b-fa7736df6746-operator-scripts\") pod \"nova-cell1-edf2-account-create-update-zn58v\" (UID: \"fd14602e-defa-4080-a30b-fa7736df6746\") " pod="openstack/nova-cell1-edf2-account-create-update-zn58v" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.198401 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnpfd\" (UniqueName: \"kubernetes.io/projected/fd14602e-defa-4080-a30b-fa7736df6746-kube-api-access-xnpfd\") pod \"nova-cell1-edf2-account-create-update-zn58v\" (UID: \"fd14602e-defa-4080-a30b-fa7736df6746\") " pod="openstack/nova-cell1-edf2-account-create-update-zn58v" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.199478 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-n667p" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.293873 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-57f7-account-create-update-8rkld" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.300112 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd14602e-defa-4080-a30b-fa7736df6746-operator-scripts\") pod \"nova-cell1-edf2-account-create-update-zn58v\" (UID: \"fd14602e-defa-4080-a30b-fa7736df6746\") " pod="openstack/nova-cell1-edf2-account-create-update-zn58v" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.300230 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnpfd\" (UniqueName: \"kubernetes.io/projected/fd14602e-defa-4080-a30b-fa7736df6746-kube-api-access-xnpfd\") pod \"nova-cell1-edf2-account-create-update-zn58v\" (UID: \"fd14602e-defa-4080-a30b-fa7736df6746\") " pod="openstack/nova-cell1-edf2-account-create-update-zn58v" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.301006 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd14602e-defa-4080-a30b-fa7736df6746-operator-scripts\") pod \"nova-cell1-edf2-account-create-update-zn58v\" (UID: \"fd14602e-defa-4080-a30b-fa7736df6746\") " pod="openstack/nova-cell1-edf2-account-create-update-zn58v" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.327446 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnpfd\" (UniqueName: \"kubernetes.io/projected/fd14602e-defa-4080-a30b-fa7736df6746-kube-api-access-xnpfd\") pod \"nova-cell1-edf2-account-create-update-zn58v\" (UID: \"fd14602e-defa-4080-a30b-fa7736df6746\") " pod="openstack/nova-cell1-edf2-account-create-update-zn58v" Dec 11 10:57:44 crc kubenswrapper[5016]: I1211 10:57:44.519076 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-edf2-account-create-update-zn58v" Dec 11 10:57:45 crc kubenswrapper[5016]: I1211 10:57:45.488413 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:57:45 crc kubenswrapper[5016]: I1211 10:57:45.489665 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" containerName="glance-log" containerID="cri-o://5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec" gracePeriod=30 Dec 11 10:57:45 crc kubenswrapper[5016]: I1211 10:57:45.489820 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" containerName="glance-httpd" containerID="cri-o://12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5" gracePeriod=30 Dec 11 10:57:45 crc kubenswrapper[5016]: I1211 10:57:45.681432 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:45 crc kubenswrapper[5016]: I1211 10:57:45.686880 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-58f98f7fd9-rtbw4" Dec 11 10:57:45 crc kubenswrapper[5016]: I1211 10:57:45.690117 5016 generic.go:334] "Generic (PLEG): container finished" podID="bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" containerID="5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec" exitCode=143 Dec 11 10:57:45 crc kubenswrapper[5016]: I1211 10:57:45.690179 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057","Type":"ContainerDied","Data":"5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec"} Dec 11 10:57:46 crc kubenswrapper[5016]: I1211 10:57:46.700988 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.163:3000/\": dial tcp 10.217.0.163:3000: connect: connection refused" Dec 11 10:57:46 crc kubenswrapper[5016]: I1211 10:57:46.749322 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:57:46 crc kubenswrapper[5016]: I1211 10:57:46.749595 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8542c44c-4c37-431f-a2b1-7ff93d36f4d0" containerName="glance-log" containerID="cri-o://1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb" gracePeriod=30 Dec 11 10:57:46 crc kubenswrapper[5016]: I1211 10:57:46.749826 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8542c44c-4c37-431f-a2b1-7ff93d36f4d0" containerName="glance-httpd" containerID="cri-o://5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d" gracePeriod=30 Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.171979 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.285286 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-sg-core-conf-yaml\") pod \"53bba674-9897-4df3-8898-bfba40d352f2\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.285390 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-config-data\") pod \"53bba674-9897-4df3-8898-bfba40d352f2\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.285413 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-combined-ca-bundle\") pod \"53bba674-9897-4df3-8898-bfba40d352f2\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.285474 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/53bba674-9897-4df3-8898-bfba40d352f2-run-httpd\") pod \"53bba674-9897-4df3-8898-bfba40d352f2\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.285533 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6ksj\" (UniqueName: \"kubernetes.io/projected/53bba674-9897-4df3-8898-bfba40d352f2-kube-api-access-g6ksj\") pod \"53bba674-9897-4df3-8898-bfba40d352f2\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.285666 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/53bba674-9897-4df3-8898-bfba40d352f2-log-httpd\") pod \"53bba674-9897-4df3-8898-bfba40d352f2\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.285718 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-scripts\") pod \"53bba674-9897-4df3-8898-bfba40d352f2\" (UID: \"53bba674-9897-4df3-8898-bfba40d352f2\") " Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.296391 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53bba674-9897-4df3-8898-bfba40d352f2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "53bba674-9897-4df3-8898-bfba40d352f2" (UID: "53bba674-9897-4df3-8898-bfba40d352f2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.328341 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53bba674-9897-4df3-8898-bfba40d352f2-kube-api-access-g6ksj" (OuterVolumeSpecName: "kube-api-access-g6ksj") pod "53bba674-9897-4df3-8898-bfba40d352f2" (UID: "53bba674-9897-4df3-8898-bfba40d352f2"). InnerVolumeSpecName "kube-api-access-g6ksj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.339311 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53bba674-9897-4df3-8898-bfba40d352f2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "53bba674-9897-4df3-8898-bfba40d352f2" (UID: "53bba674-9897-4df3-8898-bfba40d352f2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.339849 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-scripts" (OuterVolumeSpecName: "scripts") pod "53bba674-9897-4df3-8898-bfba40d352f2" (UID: "53bba674-9897-4df3-8898-bfba40d352f2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.380827 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "53bba674-9897-4df3-8898-bfba40d352f2" (UID: "53bba674-9897-4df3-8898-bfba40d352f2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.390568 5016 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/53bba674-9897-4df3-8898-bfba40d352f2-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.391097 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.391114 5016 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.391130 5016 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/53bba674-9897-4df3-8898-bfba40d352f2-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.391143 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6ksj\" (UniqueName: \"kubernetes.io/projected/53bba674-9897-4df3-8898-bfba40d352f2-kube-api-access-g6ksj\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.612208 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-n4qm6"] Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.615994 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-config-data" (OuterVolumeSpecName: "config-data") pod "53bba674-9897-4df3-8898-bfba40d352f2" (UID: "53bba674-9897-4df3-8898-bfba40d352f2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.638813 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.663705 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "53bba674-9897-4df3-8898-bfba40d352f2" (UID: "53bba674-9897-4df3-8898-bfba40d352f2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.725735 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-n4qm6" event={"ID":"c8a2de44-054b-4a31-8d4e-d88d349d59f5","Type":"ContainerStarted","Data":"b109e3c9442031b790e539909b4c40101f8d4cbe1d8473c494b4eeafecd94eae"} Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.733196 5016 generic.go:334] "Generic (PLEG): container finished" podID="8542c44c-4c37-431f-a2b1-7ff93d36f4d0" containerID="1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb" exitCode=143 Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.733267 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8542c44c-4c37-431f-a2b1-7ff93d36f4d0","Type":"ContainerDied","Data":"1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb"} Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.735111 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"5ab24d20-cfe0-4aeb-a0df-e0d0b245e863","Type":"ContainerStarted","Data":"2c541af2276e530ac206001408f6932f514976189ee4aab92724678b5a0e5a90"} Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.740289 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53bba674-9897-4df3-8898-bfba40d352f2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.741091 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"53bba674-9897-4df3-8898-bfba40d352f2","Type":"ContainerDied","Data":"8250ad5bff45a2896a193d47ff73cba46914d7034e4cf1ad7d51e2597ddb44b2"} Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.741181 5016 scope.go:117] "RemoveContainer" containerID="82e929c0de411a73ee6f81bf7c3079d6bca91bd80d03316c5158e602327bfa6e" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.741208 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.767583 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-57f7-account-create-update-8rkld"] Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.782100 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.641746553 podStartE2EDuration="17.782073084s" podCreationTimestamp="2025-12-11 10:57:30 +0000 UTC" firstStartedPulling="2025-12-11 10:57:31.750321857 +0000 UTC m=+1368.568881436" lastFinishedPulling="2025-12-11 10:57:46.890648388 +0000 UTC m=+1383.709207967" observedRunningTime="2025-12-11 10:57:47.760321859 +0000 UTC m=+1384.578881448" watchObservedRunningTime="2025-12-11 10:57:47.782073084 +0000 UTC m=+1384.600632663" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.864046 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-edf2-account-create-update-zn58v"] Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.875279 5016 scope.go:117] "RemoveContainer" containerID="9409c8b825ecaa3956452afd3023535eaadcb4765df8db9420dd3c8fcf783c14" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.885519 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.906134 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.938165 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:47 crc kubenswrapper[5016]: E1211 10:57:47.951650 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="ceilometer-notification-agent" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.951715 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="ceilometer-notification-agent" Dec 11 10:57:47 crc kubenswrapper[5016]: E1211 10:57:47.951730 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="proxy-httpd" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.951739 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="proxy-httpd" Dec 11 10:57:47 crc kubenswrapper[5016]: E1211 10:57:47.951773 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="ceilometer-central-agent" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.951782 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="ceilometer-central-agent" Dec 11 10:57:47 crc kubenswrapper[5016]: E1211 10:57:47.951804 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="sg-core" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.951812 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="sg-core" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.952183 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="sg-core" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.952207 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="ceilometer-notification-agent" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.952225 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="proxy-httpd" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.952245 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="53bba674-9897-4df3-8898-bfba40d352f2" containerName="ceilometer-central-agent" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.954892 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.969434 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.973730 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 10:57:47 crc kubenswrapper[5016]: I1211 10:57:47.990178 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.012556 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2d6c-account-create-update-6j78f"] Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.024095 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-n667p"] Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.054482 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.054554 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-run-httpd\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.054628 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7tzb\" (UniqueName: \"kubernetes.io/projected/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-kube-api-access-x7tzb\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.054686 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-scripts\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.054742 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-config-data\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.054769 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.054808 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-log-httpd\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.056357 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-r4v66"] Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.073619 5016 scope.go:117] "RemoveContainer" containerID="9d1b4a05f1389775128fcbbceb131df2c8eac3e95800a2adc09bd6c8934d0305" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.157445 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-scripts\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.157682 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-config-data\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.158595 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.158773 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-log-httpd\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.159396 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.159430 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-run-httpd\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.159490 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7tzb\" (UniqueName: \"kubernetes.io/projected/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-kube-api-access-x7tzb\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.159297 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-log-httpd\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.161195 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-run-httpd\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.176651 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-config-data\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.176167 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.178246 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-scripts\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.180773 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.186520 5016 scope.go:117] "RemoveContainer" containerID="d7d97a58f39c8cc6305c4b3e468721854224785535decd54547ea267ef281951" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.216873 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7tzb\" (UniqueName: \"kubernetes.io/projected/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-kube-api-access-x7tzb\") pod \"ceilometer-0\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.318485 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.320286 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-78bccb96bd-btt5f" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.320412 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.767910 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-n667p" event={"ID":"2c350656-c130-4eef-8c2f-be3f74dc25f4","Type":"ContainerStarted","Data":"71a5476311bf6e02febabf4bca987e9680f8e1e367c5d796ed3419f5a92a2201"} Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.767988 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-n667p" event={"ID":"2c350656-c130-4eef-8c2f-be3f74dc25f4","Type":"ContainerStarted","Data":"6023a5639f0c91f8e6ed3735e609951474e0ea94c73ebaed9cef813bb17272af"} Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.770286 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-57f7-account-create-update-8rkld" event={"ID":"41bed616-8e34-49a0-9ade-3b17f7988491","Type":"ContainerStarted","Data":"8d13c5d7ee7ef20caeb32d1e46126494f578e054a45b38ae0db4d48c7026ebc8"} Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.770318 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-57f7-account-create-update-8rkld" event={"ID":"41bed616-8e34-49a0-9ade-3b17f7988491","Type":"ContainerStarted","Data":"4b7dca1f7a30dda188f260ff421635ea8414fe76629f4afd04139948b6dcbfa6"} Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.774869 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-edf2-account-create-update-zn58v" event={"ID":"fd14602e-defa-4080-a30b-fa7736df6746","Type":"ContainerStarted","Data":"aee1e178f02878f7ba0e4b0d452285e680f104aaf81231b2b438c0e18c26df52"} Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.774906 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-edf2-account-create-update-zn58v" event={"ID":"fd14602e-defa-4080-a30b-fa7736df6746","Type":"ContainerStarted","Data":"788be2e155995e2156c2e5dbcea635289b5b8fc4e414d51bb6f8a21071a7a67b"} Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.778640 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-r4v66" event={"ID":"ddbcb259-d1f7-4de6-b255-114890395ec8","Type":"ContainerStarted","Data":"31d7380f977ee5e2b5a6d3f1766e01dc26d619ce3526ef7b0d910224e0687a0e"} Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.779772 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2d6c-account-create-update-6j78f" event={"ID":"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75","Type":"ContainerStarted","Data":"544d729a28b95074c7262ada2fde00046f60cdaddde7d639ecd074d64c52b77a"} Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.780871 5016 generic.go:334] "Generic (PLEG): container finished" podID="c8a2de44-054b-4a31-8d4e-d88d349d59f5" containerID="3ecaa435ad620f9fac63414d2ce71452cc5013e77b2fbaab44abfaafabcf450c" exitCode=0 Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.781489 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-n4qm6" event={"ID":"c8a2de44-054b-4a31-8d4e-d88d349d59f5","Type":"ContainerDied","Data":"3ecaa435ad620f9fac63414d2ce71452cc5013e77b2fbaab44abfaafabcf450c"} Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.793827 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-57f7-account-create-update-8rkld" podStartSLOduration=5.793793591 podStartE2EDuration="5.793793591s" podCreationTimestamp="2025-12-11 10:57:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:57:48.788007051 +0000 UTC m=+1385.606566650" watchObservedRunningTime="2025-12-11 10:57:48.793793591 +0000 UTC m=+1385.612353170" Dec 11 10:57:48 crc kubenswrapper[5016]: I1211 10:57:48.813861 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-edf2-account-create-update-zn58v" podStartSLOduration=4.813845384 podStartE2EDuration="4.813845384s" podCreationTimestamp="2025-12-11 10:57:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:57:48.811303123 +0000 UTC m=+1385.629862692" watchObservedRunningTime="2025-12-11 10:57:48.813845384 +0000 UTC m=+1385.632404953" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.001165 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.397186 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:49 crc kubenswrapper[5016]: W1211 10:57:49.429770 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f0742b8_b63b_4aa6_ac11_14e00e26a1a9.slice/crio-b4049bb30081188745d764de6235541edbf53fecf2d2cc456a77ff04339e9b87 WatchSource:0}: Error finding container b4049bb30081188745d764de6235541edbf53fecf2d2cc456a77ff04339e9b87: Status 404 returned error can't find the container with id b4049bb30081188745d764de6235541edbf53fecf2d2cc456a77ff04339e9b87 Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.486307 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53bba674-9897-4df3-8898-bfba40d352f2" path="/var/lib/kubelet/pods/53bba674-9897-4df3-8898-bfba40d352f2/volumes" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.632529 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.708692 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-public-tls-certs\") pod \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.708911 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-config-data\") pod \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.709041 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-combined-ca-bundle\") pod \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.709069 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-httpd-run\") pod \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.709131 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.709233 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-scripts\") pod \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.709292 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pcff\" (UniqueName: \"kubernetes.io/projected/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-kube-api-access-2pcff\") pod \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.709369 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-logs\") pod \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\" (UID: \"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057\") " Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.710353 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" (UID: "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.711085 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-logs" (OuterVolumeSpecName: "logs") pod "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" (UID: "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.717070 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-scripts" (OuterVolumeSpecName: "scripts") pod "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" (UID: "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.717132 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-kube-api-access-2pcff" (OuterVolumeSpecName: "kube-api-access-2pcff") pod "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" (UID: "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057"). InnerVolumeSpecName "kube-api-access-2pcff". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.721965 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" (UID: "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.751391 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" (UID: "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.776725 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" (UID: "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.788739 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-config-data" (OuterVolumeSpecName: "config-data") pod "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" (UID: "bc3294fd-9e5a-4075-8f7a-fa3c4b20c057"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.793562 5016 generic.go:334] "Generic (PLEG): container finished" podID="fd14602e-defa-4080-a30b-fa7736df6746" containerID="aee1e178f02878f7ba0e4b0d452285e680f104aaf81231b2b438c0e18c26df52" exitCode=0 Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.793624 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-edf2-account-create-update-zn58v" event={"ID":"fd14602e-defa-4080-a30b-fa7736df6746","Type":"ContainerDied","Data":"aee1e178f02878f7ba0e4b0d452285e680f104aaf81231b2b438c0e18c26df52"} Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.795042 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9","Type":"ContainerStarted","Data":"b4049bb30081188745d764de6235541edbf53fecf2d2cc456a77ff04339e9b87"} Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.796433 5016 generic.go:334] "Generic (PLEG): container finished" podID="ddbcb259-d1f7-4de6-b255-114890395ec8" containerID="c18189ddbde556c4681560abf1832321cda0ce890ecfd6f8bf634c518c857395" exitCode=0 Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.796474 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-r4v66" event={"ID":"ddbcb259-d1f7-4de6-b255-114890395ec8","Type":"ContainerDied","Data":"c18189ddbde556c4681560abf1832321cda0ce890ecfd6f8bf634c518c857395"} Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.799269 5016 generic.go:334] "Generic (PLEG): container finished" podID="bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" containerID="12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5" exitCode=0 Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.799306 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057","Type":"ContainerDied","Data":"12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5"} Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.799323 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc3294fd-9e5a-4075-8f7a-fa3c4b20c057","Type":"ContainerDied","Data":"720b688741a3a8965149a5d19d96af94aa2b1d68421857bdb9bac472e98ae5d8"} Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.799341 5016 scope.go:117] "RemoveContainer" containerID="12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.799443 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.806979 5016 generic.go:334] "Generic (PLEG): container finished" podID="4a8b2169-5dd3-4e93-9b65-d665c3cf5e75" containerID="e6bf1a49839e29ac76eba4b99538702468547e75fa013f904a5a8f51d778a93e" exitCode=0 Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.807149 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2d6c-account-create-update-6j78f" event={"ID":"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75","Type":"ContainerDied","Data":"e6bf1a49839e29ac76eba4b99538702468547e75fa013f904a5a8f51d778a93e"} Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.812461 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.812516 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.812537 5016 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.812597 5016 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.812615 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.812629 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pcff\" (UniqueName: \"kubernetes.io/projected/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-kube-api-access-2pcff\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.812647 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.812660 5016 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.813585 5016 generic.go:334] "Generic (PLEG): container finished" podID="2c350656-c130-4eef-8c2f-be3f74dc25f4" containerID="71a5476311bf6e02febabf4bca987e9680f8e1e367c5d796ed3419f5a92a2201" exitCode=0 Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.813656 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-n667p" event={"ID":"2c350656-c130-4eef-8c2f-be3f74dc25f4","Type":"ContainerDied","Data":"71a5476311bf6e02febabf4bca987e9680f8e1e367c5d796ed3419f5a92a2201"} Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.815203 5016 generic.go:334] "Generic (PLEG): container finished" podID="41bed616-8e34-49a0-9ade-3b17f7988491" containerID="8d13c5d7ee7ef20caeb32d1e46126494f578e054a45b38ae0db4d48c7026ebc8" exitCode=0 Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.815389 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-57f7-account-create-update-8rkld" event={"ID":"41bed616-8e34-49a0-9ade-3b17f7988491","Type":"ContainerDied","Data":"8d13c5d7ee7ef20caeb32d1e46126494f578e054a45b38ae0db4d48c7026ebc8"} Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.839525 5016 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.906811 5016 scope.go:117] "RemoveContainer" containerID="5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.915281 5016 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.967212 5016 scope.go:117] "RemoveContainer" containerID="12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5" Dec 11 10:57:49 crc kubenswrapper[5016]: E1211 10:57:49.976600 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5\": container with ID starting with 12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5 not found: ID does not exist" containerID="12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.976654 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5"} err="failed to get container status \"12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5\": rpc error: code = NotFound desc = could not find container \"12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5\": container with ID starting with 12b197144a6dbe9c4044a4b34d7437a1397428a216832b0db895ca2b62f7a6b5 not found: ID does not exist" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.976683 5016 scope.go:117] "RemoveContainer" containerID="5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec" Dec 11 10:57:49 crc kubenswrapper[5016]: E1211 10:57:49.976968 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec\": container with ID starting with 5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec not found: ID does not exist" containerID="5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.976994 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec"} err="failed to get container status \"5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec\": rpc error: code = NotFound desc = could not find container \"5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec\": container with ID starting with 5905a38af080e30937f4523f04bf64fd9d14c66da228f16b92378e1fb36a4cec not found: ID does not exist" Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.982022 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.990206 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:57:49 crc kubenswrapper[5016]: I1211 10:57:49.999023 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:57:50 crc kubenswrapper[5016]: E1211 10:57:49.999777 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" containerName="glance-httpd" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:49.999804 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" containerName="glance-httpd" Dec 11 10:57:50 crc kubenswrapper[5016]: E1211 10:57:49.999854 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" containerName="glance-log" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:49.999868 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" containerName="glance-log" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.000173 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" containerName="glance-httpd" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.000191 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" containerName="glance-log" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.001809 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.007880 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.008931 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.033289 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.120235 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67cb2370-3bd3-4105-9369-3b99535ed13f-logs\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.120272 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fb7gl\" (UniqueName: \"kubernetes.io/projected/67cb2370-3bd3-4105-9369-3b99535ed13f-kube-api-access-fb7gl\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.120306 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67cb2370-3bd3-4105-9369-3b99535ed13f-scripts\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.120368 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/67cb2370-3bd3-4105-9369-3b99535ed13f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.120407 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67cb2370-3bd3-4105-9369-3b99535ed13f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.120438 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.120491 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67cb2370-3bd3-4105-9369-3b99535ed13f-config-data\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.120510 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67cb2370-3bd3-4105-9369-3b99535ed13f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.183701 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-n4qm6" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.222235 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zktw\" (UniqueName: \"kubernetes.io/projected/c8a2de44-054b-4a31-8d4e-d88d349d59f5-kube-api-access-7zktw\") pod \"c8a2de44-054b-4a31-8d4e-d88d349d59f5\" (UID: \"c8a2de44-054b-4a31-8d4e-d88d349d59f5\") " Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.222302 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8a2de44-054b-4a31-8d4e-d88d349d59f5-operator-scripts\") pod \"c8a2de44-054b-4a31-8d4e-d88d349d59f5\" (UID: \"c8a2de44-054b-4a31-8d4e-d88d349d59f5\") " Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.222823 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67cb2370-3bd3-4105-9369-3b99535ed13f-logs\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.222865 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fb7gl\" (UniqueName: \"kubernetes.io/projected/67cb2370-3bd3-4105-9369-3b99535ed13f-kube-api-access-fb7gl\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.222907 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67cb2370-3bd3-4105-9369-3b99535ed13f-scripts\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.222961 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/67cb2370-3bd3-4105-9369-3b99535ed13f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.223006 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67cb2370-3bd3-4105-9369-3b99535ed13f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.223041 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.223099 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67cb2370-3bd3-4105-9369-3b99535ed13f-config-data\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.223121 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67cb2370-3bd3-4105-9369-3b99535ed13f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.249398 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67cb2370-3bd3-4105-9369-3b99535ed13f-logs\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.250110 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8a2de44-054b-4a31-8d4e-d88d349d59f5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c8a2de44-054b-4a31-8d4e-d88d349d59f5" (UID: "c8a2de44-054b-4a31-8d4e-d88d349d59f5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.250831 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/67cb2370-3bd3-4105-9369-3b99535ed13f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.251789 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.255779 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67cb2370-3bd3-4105-9369-3b99535ed13f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.261736 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67cb2370-3bd3-4105-9369-3b99535ed13f-config-data\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.262381 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67cb2370-3bd3-4105-9369-3b99535ed13f-scripts\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.269908 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8a2de44-054b-4a31-8d4e-d88d349d59f5-kube-api-access-7zktw" (OuterVolumeSpecName: "kube-api-access-7zktw") pod "c8a2de44-054b-4a31-8d4e-d88d349d59f5" (UID: "c8a2de44-054b-4a31-8d4e-d88d349d59f5"). InnerVolumeSpecName "kube-api-access-7zktw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.271254 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fb7gl\" (UniqueName: \"kubernetes.io/projected/67cb2370-3bd3-4105-9369-3b99535ed13f-kube-api-access-fb7gl\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.273567 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67cb2370-3bd3-4105-9369-3b99535ed13f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.304126 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"67cb2370-3bd3-4105-9369-3b99535ed13f\") " pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.326214 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zktw\" (UniqueName: \"kubernetes.io/projected/c8a2de44-054b-4a31-8d4e-d88d349d59f5-kube-api-access-7zktw\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.326302 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8a2de44-054b-4a31-8d4e-d88d349d59f5-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.480298 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.618459 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.632072 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-logs\") pod \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.632127 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-internal-tls-certs\") pod \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.632292 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-scripts\") pod \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.632395 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-combined-ca-bundle\") pod \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.632462 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46lmr\" (UniqueName: \"kubernetes.io/projected/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-kube-api-access-46lmr\") pod \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.632520 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.632543 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-config-data\") pod \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.632578 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-httpd-run\") pod \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\" (UID: \"8542c44c-4c37-431f-a2b1-7ff93d36f4d0\") " Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.633792 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8542c44c-4c37-431f-a2b1-7ff93d36f4d0" (UID: "8542c44c-4c37-431f-a2b1-7ff93d36f4d0"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.634190 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-logs" (OuterVolumeSpecName: "logs") pod "8542c44c-4c37-431f-a2b1-7ff93d36f4d0" (UID: "8542c44c-4c37-431f-a2b1-7ff93d36f4d0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.642610 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "8542c44c-4c37-431f-a2b1-7ff93d36f4d0" (UID: "8542c44c-4c37-431f-a2b1-7ff93d36f4d0"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.645046 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-kube-api-access-46lmr" (OuterVolumeSpecName: "kube-api-access-46lmr") pod "8542c44c-4c37-431f-a2b1-7ff93d36f4d0" (UID: "8542c44c-4c37-431f-a2b1-7ff93d36f4d0"). InnerVolumeSpecName "kube-api-access-46lmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.656231 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-scripts" (OuterVolumeSpecName: "scripts") pod "8542c44c-4c37-431f-a2b1-7ff93d36f4d0" (UID: "8542c44c-4c37-431f-a2b1-7ff93d36f4d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.709503 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8542c44c-4c37-431f-a2b1-7ff93d36f4d0" (UID: "8542c44c-4c37-431f-a2b1-7ff93d36f4d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.735417 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.735456 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.735466 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46lmr\" (UniqueName: \"kubernetes.io/projected/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-kube-api-access-46lmr\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.735491 5016 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.735503 5016 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.735512 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.759269 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-config-data" (OuterVolumeSpecName: "config-data") pod "8542c44c-4c37-431f-a2b1-7ff93d36f4d0" (UID: "8542c44c-4c37-431f-a2b1-7ff93d36f4d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.780147 5016 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.787637 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8542c44c-4c37-431f-a2b1-7ff93d36f4d0" (UID: "8542c44c-4c37-431f-a2b1-7ff93d36f4d0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.837828 5016 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.837863 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.837876 5016 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8542c44c-4c37-431f-a2b1-7ff93d36f4d0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.850031 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-n4qm6" event={"ID":"c8a2de44-054b-4a31-8d4e-d88d349d59f5","Type":"ContainerDied","Data":"b109e3c9442031b790e539909b4c40101f8d4cbe1d8473c494b4eeafecd94eae"} Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.850087 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b109e3c9442031b790e539909b4c40101f8d4cbe1d8473c494b4eeafecd94eae" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.850191 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-n4qm6" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.863260 5016 generic.go:334] "Generic (PLEG): container finished" podID="8542c44c-4c37-431f-a2b1-7ff93d36f4d0" containerID="5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d" exitCode=0 Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.863368 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.863397 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8542c44c-4c37-431f-a2b1-7ff93d36f4d0","Type":"ContainerDied","Data":"5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d"} Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.863443 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8542c44c-4c37-431f-a2b1-7ff93d36f4d0","Type":"ContainerDied","Data":"6adca5ccd64e20654e9fc2d5e5963437605ee4aa9cb93fc95fdae6391b3e70ad"} Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.863472 5016 scope.go:117] "RemoveContainer" containerID="5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d" Dec 11 10:57:50 crc kubenswrapper[5016]: I1211 10:57:50.870483 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9","Type":"ContainerStarted","Data":"fa6fcc44e38d4fa064cdb0b70e70b4952659f48865d5096004f3a0bf7f30b758"} Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.010414 5016 scope.go:117] "RemoveContainer" containerID="1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.014340 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.078178 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.092608 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:57:51 crc kubenswrapper[5016]: E1211 10:57:51.093590 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8542c44c-4c37-431f-a2b1-7ff93d36f4d0" containerName="glance-httpd" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.093613 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="8542c44c-4c37-431f-a2b1-7ff93d36f4d0" containerName="glance-httpd" Dec 11 10:57:51 crc kubenswrapper[5016]: E1211 10:57:51.093629 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8542c44c-4c37-431f-a2b1-7ff93d36f4d0" containerName="glance-log" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.093639 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="8542c44c-4c37-431f-a2b1-7ff93d36f4d0" containerName="glance-log" Dec 11 10:57:51 crc kubenswrapper[5016]: E1211 10:57:51.093664 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8a2de44-054b-4a31-8d4e-d88d349d59f5" containerName="mariadb-database-create" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.093672 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8a2de44-054b-4a31-8d4e-d88d349d59f5" containerName="mariadb-database-create" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.093911 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8a2de44-054b-4a31-8d4e-d88d349d59f5" containerName="mariadb-database-create" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.093935 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="8542c44c-4c37-431f-a2b1-7ff93d36f4d0" containerName="glance-log" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.093963 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="8542c44c-4c37-431f-a2b1-7ff93d36f4d0" containerName="glance-httpd" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.102658 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.105980 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.112775 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.113106 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.126668 5016 scope.go:117] "RemoveContainer" containerID="5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d" Dec 11 10:57:51 crc kubenswrapper[5016]: E1211 10:57:51.129215 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d\": container with ID starting with 5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d not found: ID does not exist" containerID="5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.129364 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d"} err="failed to get container status \"5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d\": rpc error: code = NotFound desc = could not find container \"5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d\": container with ID starting with 5e1aeafd9e69bf68c6c242d6bea408caa7835b3482b9a19e68caf02a60d4fc8d not found: ID does not exist" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.129450 5016 scope.go:117] "RemoveContainer" containerID="1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb" Dec 11 10:57:51 crc kubenswrapper[5016]: E1211 10:57:51.140702 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb\": container with ID starting with 1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb not found: ID does not exist" containerID="1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.140751 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb"} err="failed to get container status \"1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb\": rpc error: code = NotFound desc = could not find container \"1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb\": container with ID starting with 1fc9a0a86abeef4270e2432ef9906f2ff5065c1efbf95f7099fde7f2edd834fb not found: ID does not exist" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.268887 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3243e41d-6485-4353-993a-11f309322b5f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.269563 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3243e41d-6485-4353-993a-11f309322b5f-logs\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.269597 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.269620 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3243e41d-6485-4353-993a-11f309322b5f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.269723 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3243e41d-6485-4353-993a-11f309322b5f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.271258 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxwmf\" (UniqueName: \"kubernetes.io/projected/3243e41d-6485-4353-993a-11f309322b5f-kube-api-access-zxwmf\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.271286 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3243e41d-6485-4353-993a-11f309322b5f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.271324 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3243e41d-6485-4353-993a-11f309322b5f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.316241 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.376446 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxwmf\" (UniqueName: \"kubernetes.io/projected/3243e41d-6485-4353-993a-11f309322b5f-kube-api-access-zxwmf\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.376505 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3243e41d-6485-4353-993a-11f309322b5f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.376538 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3243e41d-6485-4353-993a-11f309322b5f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.376640 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3243e41d-6485-4353-993a-11f309322b5f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.376662 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3243e41d-6485-4353-993a-11f309322b5f-logs\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.376697 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.376713 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3243e41d-6485-4353-993a-11f309322b5f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.376770 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3243e41d-6485-4353-993a-11f309322b5f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.377782 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.384173 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3243e41d-6485-4353-993a-11f309322b5f-logs\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.384259 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3243e41d-6485-4353-993a-11f309322b5f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.387558 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3243e41d-6485-4353-993a-11f309322b5f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.387701 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3243e41d-6485-4353-993a-11f309322b5f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.391422 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3243e41d-6485-4353-993a-11f309322b5f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.392003 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3243e41d-6485-4353-993a-11f309322b5f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.395865 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxwmf\" (UniqueName: \"kubernetes.io/projected/3243e41d-6485-4353-993a-11f309322b5f-kube-api-access-zxwmf\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.420404 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"3243e41d-6485-4353-993a-11f309322b5f\") " pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.494029 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8542c44c-4c37-431f-a2b1-7ff93d36f4d0" path="/var/lib/kubelet/pods/8542c44c-4c37-431f-a2b1-7ff93d36f4d0/volumes" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.495189 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc3294fd-9e5a-4075-8f7a-fa3c4b20c057" path="/var/lib/kubelet/pods/bc3294fd-9e5a-4075-8f7a-fa3c4b20c057/volumes" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.504666 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.699776 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-edf2-account-create-update-zn58v" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.749449 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-n667p" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.789546 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnpfd\" (UniqueName: \"kubernetes.io/projected/fd14602e-defa-4080-a30b-fa7736df6746-kube-api-access-xnpfd\") pod \"fd14602e-defa-4080-a30b-fa7736df6746\" (UID: \"fd14602e-defa-4080-a30b-fa7736df6746\") " Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.789645 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd14602e-defa-4080-a30b-fa7736df6746-operator-scripts\") pod \"fd14602e-defa-4080-a30b-fa7736df6746\" (UID: \"fd14602e-defa-4080-a30b-fa7736df6746\") " Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.793121 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd14602e-defa-4080-a30b-fa7736df6746-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fd14602e-defa-4080-a30b-fa7736df6746" (UID: "fd14602e-defa-4080-a30b-fa7736df6746"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.823703 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd14602e-defa-4080-a30b-fa7736df6746-kube-api-access-xnpfd" (OuterVolumeSpecName: "kube-api-access-xnpfd") pod "fd14602e-defa-4080-a30b-fa7736df6746" (UID: "fd14602e-defa-4080-a30b-fa7736df6746"). InnerVolumeSpecName "kube-api-access-xnpfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.895236 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c350656-c130-4eef-8c2f-be3f74dc25f4-operator-scripts\") pod \"2c350656-c130-4eef-8c2f-be3f74dc25f4\" (UID: \"2c350656-c130-4eef-8c2f-be3f74dc25f4\") " Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.895490 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8jbg\" (UniqueName: \"kubernetes.io/projected/2c350656-c130-4eef-8c2f-be3f74dc25f4-kube-api-access-f8jbg\") pod \"2c350656-c130-4eef-8c2f-be3f74dc25f4\" (UID: \"2c350656-c130-4eef-8c2f-be3f74dc25f4\") " Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.895787 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c350656-c130-4eef-8c2f-be3f74dc25f4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2c350656-c130-4eef-8c2f-be3f74dc25f4" (UID: "2c350656-c130-4eef-8c2f-be3f74dc25f4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.896589 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c350656-c130-4eef-8c2f-be3f74dc25f4-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.896614 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnpfd\" (UniqueName: \"kubernetes.io/projected/fd14602e-defa-4080-a30b-fa7736df6746-kube-api-access-xnpfd\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.896629 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd14602e-defa-4080-a30b-fa7736df6746-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.922313 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c350656-c130-4eef-8c2f-be3f74dc25f4-kube-api-access-f8jbg" (OuterVolumeSpecName: "kube-api-access-f8jbg") pod "2c350656-c130-4eef-8c2f-be3f74dc25f4" (UID: "2c350656-c130-4eef-8c2f-be3f74dc25f4"). InnerVolumeSpecName "kube-api-access-f8jbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.939200 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-57f7-account-create-update-8rkld" event={"ID":"41bed616-8e34-49a0-9ade-3b17f7988491","Type":"ContainerDied","Data":"4b7dca1f7a30dda188f260ff421635ea8414fe76629f4afd04139948b6dcbfa6"} Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.939250 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b7dca1f7a30dda188f260ff421635ea8414fe76629f4afd04139948b6dcbfa6" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.971164 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-edf2-account-create-update-zn58v" event={"ID":"fd14602e-defa-4080-a30b-fa7736df6746","Type":"ContainerDied","Data":"788be2e155995e2156c2e5dbcea635289b5b8fc4e414d51bb6f8a21071a7a67b"} Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.971210 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-edf2-account-create-update-zn58v" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.971216 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="788be2e155995e2156c2e5dbcea635289b5b8fc4e414d51bb6f8a21071a7a67b" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.979344 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-r4v66" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.984609 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2d6c-account-create-update-6j78f" Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.989178 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"67cb2370-3bd3-4105-9369-3b99535ed13f","Type":"ContainerStarted","Data":"808ba189a37f735e256946487a9fbcfaf36d627db19ecfe3e8b2297da7569816"} Dec 11 10:57:51 crc kubenswrapper[5016]: I1211 10:57:51.992908 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-57f7-account-create-update-8rkld" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:51.999979 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8jbg\" (UniqueName: \"kubernetes.io/projected/2c350656-c130-4eef-8c2f-be3f74dc25f4-kube-api-access-f8jbg\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.004688 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9","Type":"ContainerStarted","Data":"1c51cd327b9270d5555e9ab79ae4033d91fbef464e28cbede85505027eb6d411"} Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.007485 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-r4v66" event={"ID":"ddbcb259-d1f7-4de6-b255-114890395ec8","Type":"ContainerDied","Data":"31d7380f977ee5e2b5a6d3f1766e01dc26d619ce3526ef7b0d910224e0687a0e"} Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.007512 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31d7380f977ee5e2b5a6d3f1766e01dc26d619ce3526ef7b0d910224e0687a0e" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.007605 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-r4v66" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.017341 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2d6c-account-create-update-6j78f" event={"ID":"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75","Type":"ContainerDied","Data":"544d729a28b95074c7262ada2fde00046f60cdaddde7d639ecd074d64c52b77a"} Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.017375 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="544d729a28b95074c7262ada2fde00046f60cdaddde7d639ecd074d64c52b77a" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.017433 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2d6c-account-create-update-6j78f" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.032635 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-n667p" event={"ID":"2c350656-c130-4eef-8c2f-be3f74dc25f4","Type":"ContainerDied","Data":"6023a5639f0c91f8e6ed3735e609951474e0ea94c73ebaed9cef813bb17272af"} Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.032981 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6023a5639f0c91f8e6ed3735e609951474e0ea94c73ebaed9cef813bb17272af" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.033104 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-n667p" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.105197 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5xmk\" (UniqueName: \"kubernetes.io/projected/ddbcb259-d1f7-4de6-b255-114890395ec8-kube-api-access-m5xmk\") pod \"ddbcb259-d1f7-4de6-b255-114890395ec8\" (UID: \"ddbcb259-d1f7-4de6-b255-114890395ec8\") " Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.105289 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xm6jk\" (UniqueName: \"kubernetes.io/projected/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75-kube-api-access-xm6jk\") pod \"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75\" (UID: \"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75\") " Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.105392 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kr7d4\" (UniqueName: \"kubernetes.io/projected/41bed616-8e34-49a0-9ade-3b17f7988491-kube-api-access-kr7d4\") pod \"41bed616-8e34-49a0-9ade-3b17f7988491\" (UID: \"41bed616-8e34-49a0-9ade-3b17f7988491\") " Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.105485 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41bed616-8e34-49a0-9ade-3b17f7988491-operator-scripts\") pod \"41bed616-8e34-49a0-9ade-3b17f7988491\" (UID: \"41bed616-8e34-49a0-9ade-3b17f7988491\") " Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.105557 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75-operator-scripts\") pod \"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75\" (UID: \"4a8b2169-5dd3-4e93-9b65-d665c3cf5e75\") " Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.105652 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddbcb259-d1f7-4de6-b255-114890395ec8-operator-scripts\") pod \"ddbcb259-d1f7-4de6-b255-114890395ec8\" (UID: \"ddbcb259-d1f7-4de6-b255-114890395ec8\") " Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.112677 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ddbcb259-d1f7-4de6-b255-114890395ec8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ddbcb259-d1f7-4de6-b255-114890395ec8" (UID: "ddbcb259-d1f7-4de6-b255-114890395ec8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.113237 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41bed616-8e34-49a0-9ade-3b17f7988491-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "41bed616-8e34-49a0-9ade-3b17f7988491" (UID: "41bed616-8e34-49a0-9ade-3b17f7988491"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.113696 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4a8b2169-5dd3-4e93-9b65-d665c3cf5e75" (UID: "4a8b2169-5dd3-4e93-9b65-d665c3cf5e75"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.116859 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41bed616-8e34-49a0-9ade-3b17f7988491-kube-api-access-kr7d4" (OuterVolumeSpecName: "kube-api-access-kr7d4") pod "41bed616-8e34-49a0-9ade-3b17f7988491" (UID: "41bed616-8e34-49a0-9ade-3b17f7988491"). InnerVolumeSpecName "kube-api-access-kr7d4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.135017 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75-kube-api-access-xm6jk" (OuterVolumeSpecName: "kube-api-access-xm6jk") pod "4a8b2169-5dd3-4e93-9b65-d665c3cf5e75" (UID: "4a8b2169-5dd3-4e93-9b65-d665c3cf5e75"). InnerVolumeSpecName "kube-api-access-xm6jk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.171511 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddbcb259-d1f7-4de6-b255-114890395ec8-kube-api-access-m5xmk" (OuterVolumeSpecName: "kube-api-access-m5xmk") pod "ddbcb259-d1f7-4de6-b255-114890395ec8" (UID: "ddbcb259-d1f7-4de6-b255-114890395ec8"). InnerVolumeSpecName "kube-api-access-m5xmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.214888 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.215054 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ddbcb259-d1f7-4de6-b255-114890395ec8-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.215070 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5xmk\" (UniqueName: \"kubernetes.io/projected/ddbcb259-d1f7-4de6-b255-114890395ec8-kube-api-access-m5xmk\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.215106 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xm6jk\" (UniqueName: \"kubernetes.io/projected/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75-kube-api-access-xm6jk\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.215117 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kr7d4\" (UniqueName: \"kubernetes.io/projected/41bed616-8e34-49a0-9ade-3b17f7988491-kube-api-access-kr7d4\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.215126 5016 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41bed616-8e34-49a0-9ade-3b17f7988491-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:52 crc kubenswrapper[5016]: I1211 10:57:52.365239 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 11 10:57:52 crc kubenswrapper[5016]: W1211 10:57:52.378459 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3243e41d_6485_4353_993a_11f309322b5f.slice/crio-91680f7fc5086cbae093fbd792631e9f4226a08060f136cd5b37038695b3e8a2 WatchSource:0}: Error finding container 91680f7fc5086cbae093fbd792631e9f4226a08060f136cd5b37038695b3e8a2: Status 404 returned error can't find the container with id 91680f7fc5086cbae093fbd792631e9f4226a08060f136cd5b37038695b3e8a2 Dec 11 10:57:53 crc kubenswrapper[5016]: I1211 10:57:53.069065 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3243e41d-6485-4353-993a-11f309322b5f","Type":"ContainerStarted","Data":"91680f7fc5086cbae093fbd792631e9f4226a08060f136cd5b37038695b3e8a2"} Dec 11 10:57:53 crc kubenswrapper[5016]: I1211 10:57:53.075214 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"67cb2370-3bd3-4105-9369-3b99535ed13f","Type":"ContainerStarted","Data":"e26f804dd8afa9c6771f5e7117a0ffe2624d45e2bff32ac48eb1d16f7edf5d70"} Dec 11 10:57:53 crc kubenswrapper[5016]: I1211 10:57:53.088548 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-57f7-account-create-update-8rkld" Dec 11 10:57:53 crc kubenswrapper[5016]: I1211 10:57:53.089104 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9","Type":"ContainerStarted","Data":"3d279e54b5a9f65a6a500276a2ad2ae240b2be4f3f359a86452815bbe78d905c"} Dec 11 10:57:53 crc kubenswrapper[5016]: I1211 10:57:53.964644 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.079646 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlhkq\" (UniqueName: \"kubernetes.io/projected/6f611e53-2b48-4371-8673-dd02e7533a7d-kube-api-access-jlhkq\") pod \"6f611e53-2b48-4371-8673-dd02e7533a7d\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.079707 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f611e53-2b48-4371-8673-dd02e7533a7d-logs\") pod \"6f611e53-2b48-4371-8673-dd02e7533a7d\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.079753 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-horizon-secret-key\") pod \"6f611e53-2b48-4371-8673-dd02e7533a7d\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.079778 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6f611e53-2b48-4371-8673-dd02e7533a7d-config-data\") pod \"6f611e53-2b48-4371-8673-dd02e7533a7d\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.079813 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-combined-ca-bundle\") pod \"6f611e53-2b48-4371-8673-dd02e7533a7d\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.079902 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f611e53-2b48-4371-8673-dd02e7533a7d-scripts\") pod \"6f611e53-2b48-4371-8673-dd02e7533a7d\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.079925 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-horizon-tls-certs\") pod \"6f611e53-2b48-4371-8673-dd02e7533a7d\" (UID: \"6f611e53-2b48-4371-8673-dd02e7533a7d\") " Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.080526 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f611e53-2b48-4371-8673-dd02e7533a7d-logs" (OuterVolumeSpecName: "logs") pod "6f611e53-2b48-4371-8673-dd02e7533a7d" (UID: "6f611e53-2b48-4371-8673-dd02e7533a7d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.090683 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f611e53-2b48-4371-8673-dd02e7533a7d-kube-api-access-jlhkq" (OuterVolumeSpecName: "kube-api-access-jlhkq") pod "6f611e53-2b48-4371-8673-dd02e7533a7d" (UID: "6f611e53-2b48-4371-8673-dd02e7533a7d"). InnerVolumeSpecName "kube-api-access-jlhkq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.137275 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "6f611e53-2b48-4371-8673-dd02e7533a7d" (UID: "6f611e53-2b48-4371-8673-dd02e7533a7d"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.138982 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f611e53-2b48-4371-8673-dd02e7533a7d-scripts" (OuterVolumeSpecName: "scripts") pod "6f611e53-2b48-4371-8673-dd02e7533a7d" (UID: "6f611e53-2b48-4371-8673-dd02e7533a7d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.142366 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3243e41d-6485-4353-993a-11f309322b5f","Type":"ContainerStarted","Data":"59b30fa8f6f894bbe8b6c6d70ad9b19611b9bc01a75267be7a37274dd289fe82"} Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.142431 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3243e41d-6485-4353-993a-11f309322b5f","Type":"ContainerStarted","Data":"aadb1143094bdd233b51e1b1129342d1cd3608225743b90224970065f1c7b6d2"} Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.145219 5016 generic.go:334] "Generic (PLEG): container finished" podID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerID="abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900" exitCode=137 Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.145281 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78bccb96bd-btt5f" event={"ID":"6f611e53-2b48-4371-8673-dd02e7533a7d","Type":"ContainerDied","Data":"abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900"} Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.145306 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78bccb96bd-btt5f" event={"ID":"6f611e53-2b48-4371-8673-dd02e7533a7d","Type":"ContainerDied","Data":"f69afe42f958ebec2fad40234594d228395f2d8f455c349eefce2aa0c072282f"} Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.145330 5016 scope.go:117] "RemoveContainer" containerID="ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.145456 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78bccb96bd-btt5f" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.168525 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"67cb2370-3bd3-4105-9369-3b99535ed13f","Type":"ContainerStarted","Data":"ce5c68b94d0fb0d71bf5cd9f6b08703eefae049089079efeb15341e46c39b1e6"} Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.179061 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6f611e53-2b48-4371-8673-dd02e7533a7d" (UID: "6f611e53-2b48-4371-8673-dd02e7533a7d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.181854 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlhkq\" (UniqueName: \"kubernetes.io/projected/6f611e53-2b48-4371-8673-dd02e7533a7d-kube-api-access-jlhkq\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.181886 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f611e53-2b48-4371-8673-dd02e7533a7d-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.181899 5016 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.181909 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.181921 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f611e53-2b48-4371-8673-dd02e7533a7d-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.194790 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.194759642 podStartE2EDuration="4.194759642s" podCreationTimestamp="2025-12-11 10:57:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:57:54.173036378 +0000 UTC m=+1390.991595977" watchObservedRunningTime="2025-12-11 10:57:54.194759642 +0000 UTC m=+1391.013319241" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.219402 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.2193794350000005 podStartE2EDuration="5.219379435s" podCreationTimestamp="2025-12-11 10:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:57:54.202065408 +0000 UTC m=+1391.020624997" watchObservedRunningTime="2025-12-11 10:57:54.219379435 +0000 UTC m=+1391.037939014" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.232575 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f611e53-2b48-4371-8673-dd02e7533a7d-config-data" (OuterVolumeSpecName: "config-data") pod "6f611e53-2b48-4371-8673-dd02e7533a7d" (UID: "6f611e53-2b48-4371-8673-dd02e7533a7d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.253971 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "6f611e53-2b48-4371-8673-dd02e7533a7d" (UID: "6f611e53-2b48-4371-8673-dd02e7533a7d"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.284851 5016 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f611e53-2b48-4371-8673-dd02e7533a7d-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.284901 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6f611e53-2b48-4371-8673-dd02e7533a7d-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.369762 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-w6xjr"] Dec 11 10:57:54 crc kubenswrapper[5016]: E1211 10:57:54.370272 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a8b2169-5dd3-4e93-9b65-d665c3cf5e75" containerName="mariadb-account-create-update" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370293 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a8b2169-5dd3-4e93-9b65-d665c3cf5e75" containerName="mariadb-account-create-update" Dec 11 10:57:54 crc kubenswrapper[5016]: E1211 10:57:54.370309 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd14602e-defa-4080-a30b-fa7736df6746" containerName="mariadb-account-create-update" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370319 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd14602e-defa-4080-a30b-fa7736df6746" containerName="mariadb-account-create-update" Dec 11 10:57:54 crc kubenswrapper[5016]: E1211 10:57:54.370334 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41bed616-8e34-49a0-9ade-3b17f7988491" containerName="mariadb-account-create-update" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370340 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="41bed616-8e34-49a0-9ade-3b17f7988491" containerName="mariadb-account-create-update" Dec 11 10:57:54 crc kubenswrapper[5016]: E1211 10:57:54.370358 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c350656-c130-4eef-8c2f-be3f74dc25f4" containerName="mariadb-database-create" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370365 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c350656-c130-4eef-8c2f-be3f74dc25f4" containerName="mariadb-database-create" Dec 11 10:57:54 crc kubenswrapper[5016]: E1211 10:57:54.370380 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon-log" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370386 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon-log" Dec 11 10:57:54 crc kubenswrapper[5016]: E1211 10:57:54.370398 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddbcb259-d1f7-4de6-b255-114890395ec8" containerName="mariadb-database-create" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370404 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddbcb259-d1f7-4de6-b255-114890395ec8" containerName="mariadb-database-create" Dec 11 10:57:54 crc kubenswrapper[5016]: E1211 10:57:54.370424 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370430 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370592 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="41bed616-8e34-49a0-9ade-3b17f7988491" containerName="mariadb-account-create-update" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370601 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c350656-c130-4eef-8c2f-be3f74dc25f4" containerName="mariadb-database-create" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370613 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddbcb259-d1f7-4de6-b255-114890395ec8" containerName="mariadb-database-create" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370620 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon-log" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370627 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd14602e-defa-4080-a30b-fa7736df6746" containerName="mariadb-account-create-update" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370648 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a8b2169-5dd3-4e93-9b65-d665c3cf5e75" containerName="mariadb-account-create-update" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.370661 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" containerName="horizon" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.371307 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.374964 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-lvnwd" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.375893 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.380656 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.387240 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-w6xjr"] Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.391025 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-scripts\") pod \"nova-cell0-conductor-db-sync-w6xjr\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.391237 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-config-data\") pod \"nova-cell0-conductor-db-sync-w6xjr\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.391345 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w65m8\" (UniqueName: \"kubernetes.io/projected/7e3e1081-5b46-471a-8978-804c54a32bc9-kube-api-access-w65m8\") pod \"nova-cell0-conductor-db-sync-w6xjr\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.391548 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-w6xjr\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.451307 5016 scope.go:117] "RemoveContainer" containerID="abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.492829 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-78bccb96bd-btt5f"] Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.493254 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-config-data\") pod \"nova-cell0-conductor-db-sync-w6xjr\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.493356 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w65m8\" (UniqueName: \"kubernetes.io/projected/7e3e1081-5b46-471a-8978-804c54a32bc9-kube-api-access-w65m8\") pod \"nova-cell0-conductor-db-sync-w6xjr\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.493485 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-w6xjr\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.493646 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-scripts\") pod \"nova-cell0-conductor-db-sync-w6xjr\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.498525 5016 scope.go:117] "RemoveContainer" containerID="ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887" Dec 11 10:57:54 crc kubenswrapper[5016]: E1211 10:57:54.502170 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887\": container with ID starting with ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887 not found: ID does not exist" containerID="ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.502239 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887"} err="failed to get container status \"ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887\": rpc error: code = NotFound desc = could not find container \"ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887\": container with ID starting with ec2f4ab69c13e546afd91230b9fde8c5597c45d5fafb17a9743185ba130c5887 not found: ID does not exist" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.502277 5016 scope.go:117] "RemoveContainer" containerID="abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.504476 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-scripts\") pod \"nova-cell0-conductor-db-sync-w6xjr\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: E1211 10:57:54.508181 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900\": container with ID starting with abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900 not found: ID does not exist" containerID="abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.508250 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900"} err="failed to get container status \"abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900\": rpc error: code = NotFound desc = could not find container \"abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900\": container with ID starting with abba6d92ad1dfe0ad6d86578b010aecb0bcf1e665946b3d5d24a8b891c431900 not found: ID does not exist" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.508284 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-w6xjr\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.513733 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-config-data\") pod \"nova-cell0-conductor-db-sync-w6xjr\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.519550 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w65m8\" (UniqueName: \"kubernetes.io/projected/7e3e1081-5b46-471a-8978-804c54a32bc9-kube-api-access-w65m8\") pod \"nova-cell0-conductor-db-sync-w6xjr\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.522228 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-78bccb96bd-btt5f"] Dec 11 10:57:54 crc kubenswrapper[5016]: I1211 10:57:54.701359 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:57:55 crc kubenswrapper[5016]: I1211 10:57:55.305444 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-w6xjr"] Dec 11 10:57:55 crc kubenswrapper[5016]: W1211 10:57:55.309811 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e3e1081_5b46_471a_8978_804c54a32bc9.slice/crio-0ae128b9b061ecde234dcfdabb000d0145ce9e5e6e138bc7f3da9b6daad2e15e WatchSource:0}: Error finding container 0ae128b9b061ecde234dcfdabb000d0145ce9e5e6e138bc7f3da9b6daad2e15e: Status 404 returned error can't find the container with id 0ae128b9b061ecde234dcfdabb000d0145ce9e5e6e138bc7f3da9b6daad2e15e Dec 11 10:57:55 crc kubenswrapper[5016]: I1211 10:57:55.485378 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f611e53-2b48-4371-8673-dd02e7533a7d" path="/var/lib/kubelet/pods/6f611e53-2b48-4371-8673-dd02e7533a7d/volumes" Dec 11 10:57:56 crc kubenswrapper[5016]: I1211 10:57:56.230068 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9","Type":"ContainerStarted","Data":"0faf93a6abcb389c52ed97edd6a3378e1d825253cd644fb6c437696fe3867b67"} Dec 11 10:57:56 crc kubenswrapper[5016]: I1211 10:57:56.230786 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="ceilometer-central-agent" containerID="cri-o://fa6fcc44e38d4fa064cdb0b70e70b4952659f48865d5096004f3a0bf7f30b758" gracePeriod=30 Dec 11 10:57:56 crc kubenswrapper[5016]: I1211 10:57:56.231200 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 10:57:56 crc kubenswrapper[5016]: I1211 10:57:56.231478 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="proxy-httpd" containerID="cri-o://0faf93a6abcb389c52ed97edd6a3378e1d825253cd644fb6c437696fe3867b67" gracePeriod=30 Dec 11 10:57:56 crc kubenswrapper[5016]: I1211 10:57:56.231647 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="sg-core" containerID="cri-o://3d279e54b5a9f65a6a500276a2ad2ae240b2be4f3f359a86452815bbe78d905c" gracePeriod=30 Dec 11 10:57:56 crc kubenswrapper[5016]: I1211 10:57:56.231666 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="ceilometer-notification-agent" containerID="cri-o://1c51cd327b9270d5555e9ab79ae4033d91fbef464e28cbede85505027eb6d411" gracePeriod=30 Dec 11 10:57:56 crc kubenswrapper[5016]: I1211 10:57:56.232875 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-w6xjr" event={"ID":"7e3e1081-5b46-471a-8978-804c54a32bc9","Type":"ContainerStarted","Data":"0ae128b9b061ecde234dcfdabb000d0145ce9e5e6e138bc7f3da9b6daad2e15e"} Dec 11 10:57:56 crc kubenswrapper[5016]: I1211 10:57:56.268346 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.652969925 podStartE2EDuration="9.268311825s" podCreationTimestamp="2025-12-11 10:57:47 +0000 UTC" firstStartedPulling="2025-12-11 10:57:49.434398049 +0000 UTC m=+1386.252957638" lastFinishedPulling="2025-12-11 10:57:55.049739959 +0000 UTC m=+1391.868299538" observedRunningTime="2025-12-11 10:57:56.256280674 +0000 UTC m=+1393.074840253" watchObservedRunningTime="2025-12-11 10:57:56.268311825 +0000 UTC m=+1393.086871424" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.254403 5016 generic.go:334] "Generic (PLEG): container finished" podID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerID="0faf93a6abcb389c52ed97edd6a3378e1d825253cd644fb6c437696fe3867b67" exitCode=0 Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.254980 5016 generic.go:334] "Generic (PLEG): container finished" podID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerID="3d279e54b5a9f65a6a500276a2ad2ae240b2be4f3f359a86452815bbe78d905c" exitCode=2 Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.254990 5016 generic.go:334] "Generic (PLEG): container finished" podID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerID="1c51cd327b9270d5555e9ab79ae4033d91fbef464e28cbede85505027eb6d411" exitCode=0 Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.255000 5016 generic.go:334] "Generic (PLEG): container finished" podID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerID="fa6fcc44e38d4fa064cdb0b70e70b4952659f48865d5096004f3a0bf7f30b758" exitCode=0 Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.255023 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9","Type":"ContainerDied","Data":"0faf93a6abcb389c52ed97edd6a3378e1d825253cd644fb6c437696fe3867b67"} Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.255049 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9","Type":"ContainerDied","Data":"3d279e54b5a9f65a6a500276a2ad2ae240b2be4f3f359a86452815bbe78d905c"} Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.255060 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9","Type":"ContainerDied","Data":"1c51cd327b9270d5555e9ab79ae4033d91fbef464e28cbede85505027eb6d411"} Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.255070 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9","Type":"ContainerDied","Data":"fa6fcc44e38d4fa064cdb0b70e70b4952659f48865d5096004f3a0bf7f30b758"} Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.429559 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.572277 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-combined-ca-bundle\") pod \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.572818 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-scripts\") pod \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.572932 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-log-httpd\") pod \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.573046 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7tzb\" (UniqueName: \"kubernetes.io/projected/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-kube-api-access-x7tzb\") pod \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.573229 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-config-data\") pod \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.573383 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-sg-core-conf-yaml\") pod \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.573482 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-run-httpd\") pod \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.573772 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" (UID: "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.574310 5016 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.574909 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" (UID: "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.580350 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-kube-api-access-x7tzb" (OuterVolumeSpecName: "kube-api-access-x7tzb") pod "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" (UID: "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9"). InnerVolumeSpecName "kube-api-access-x7tzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.592426 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-scripts" (OuterVolumeSpecName: "scripts") pod "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" (UID: "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.613722 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" (UID: "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.676328 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" (UID: "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.677205 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-combined-ca-bundle\") pod \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\" (UID: \"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9\") " Dec 11 10:57:57 crc kubenswrapper[5016]: W1211 10:57:57.677328 5016 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9/volumes/kubernetes.io~secret/combined-ca-bundle Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.677352 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" (UID: "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.677963 5016 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.677991 5016 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.678000 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.678013 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.678024 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7tzb\" (UniqueName: \"kubernetes.io/projected/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-kube-api-access-x7tzb\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.707112 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-config-data" (OuterVolumeSpecName: "config-data") pod "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" (UID: "2f0742b8-b63b-4aa6-ac11-14e00e26a1a9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:57:57 crc kubenswrapper[5016]: I1211 10:57:57.779500 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.267564 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2f0742b8-b63b-4aa6-ac11-14e00e26a1a9","Type":"ContainerDied","Data":"b4049bb30081188745d764de6235541edbf53fecf2d2cc456a77ff04339e9b87"} Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.267611 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.267653 5016 scope.go:117] "RemoveContainer" containerID="0faf93a6abcb389c52ed97edd6a3378e1d825253cd644fb6c437696fe3867b67" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.313730 5016 scope.go:117] "RemoveContainer" containerID="3d279e54b5a9f65a6a500276a2ad2ae240b2be4f3f359a86452815bbe78d905c" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.314402 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.336821 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.348268 5016 scope.go:117] "RemoveContainer" containerID="1c51cd327b9270d5555e9ab79ae4033d91fbef464e28cbede85505027eb6d411" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.351157 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:58 crc kubenswrapper[5016]: E1211 10:57:58.351623 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="proxy-httpd" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.351646 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="proxy-httpd" Dec 11 10:57:58 crc kubenswrapper[5016]: E1211 10:57:58.351667 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="ceilometer-central-agent" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.351676 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="ceilometer-central-agent" Dec 11 10:57:58 crc kubenswrapper[5016]: E1211 10:57:58.351699 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="sg-core" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.351709 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="sg-core" Dec 11 10:57:58 crc kubenswrapper[5016]: E1211 10:57:58.351747 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="ceilometer-notification-agent" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.351755 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="ceilometer-notification-agent" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.352001 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="sg-core" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.352029 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="proxy-httpd" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.352043 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="ceilometer-central-agent" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.352061 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" containerName="ceilometer-notification-agent" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.357280 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.358863 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.360492 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.361635 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.395259 5016 scope.go:117] "RemoveContainer" containerID="fa6fcc44e38d4fa064cdb0b70e70b4952659f48865d5096004f3a0bf7f30b758" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.500077 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xs76\" (UniqueName: \"kubernetes.io/projected/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-kube-api-access-2xs76\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.500212 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-scripts\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.500271 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-log-httpd\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.500334 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.500366 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-run-httpd\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.500390 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.500446 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-config-data\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.603083 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-log-httpd\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.603151 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.603204 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-run-httpd\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.603234 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.603318 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-config-data\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.603371 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xs76\" (UniqueName: \"kubernetes.io/projected/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-kube-api-access-2xs76\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.603449 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-scripts\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.603580 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-log-httpd\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.605656 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-run-httpd\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.611773 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.612510 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.613597 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-config-data\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.614906 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-scripts\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.623721 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xs76\" (UniqueName: \"kubernetes.io/projected/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-kube-api-access-2xs76\") pod \"ceilometer-0\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.678337 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:57:58 crc kubenswrapper[5016]: I1211 10:57:58.852641 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:59 crc kubenswrapper[5016]: I1211 10:57:59.207178 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:57:59 crc kubenswrapper[5016]: W1211 10:57:59.216086 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod75f2f60e_e725_4ed1_a4f5_2814e7afc3ff.slice/crio-8b544d1fe85f04b3d46a34c7ef41d880a2d5a8242444b295bf2ad67504bb5f1e WatchSource:0}: Error finding container 8b544d1fe85f04b3d46a34c7ef41d880a2d5a8242444b295bf2ad67504bb5f1e: Status 404 returned error can't find the container with id 8b544d1fe85f04b3d46a34c7ef41d880a2d5a8242444b295bf2ad67504bb5f1e Dec 11 10:57:59 crc kubenswrapper[5016]: I1211 10:57:59.282811 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff","Type":"ContainerStarted","Data":"8b544d1fe85f04b3d46a34c7ef41d880a2d5a8242444b295bf2ad67504bb5f1e"} Dec 11 10:57:59 crc kubenswrapper[5016]: I1211 10:57:59.490384 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f0742b8-b63b-4aa6-ac11-14e00e26a1a9" path="/var/lib/kubelet/pods/2f0742b8-b63b-4aa6-ac11-14e00e26a1a9/volumes" Dec 11 10:58:00 crc kubenswrapper[5016]: I1211 10:58:00.481585 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 11 10:58:00 crc kubenswrapper[5016]: I1211 10:58:00.482002 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 11 10:58:00 crc kubenswrapper[5016]: I1211 10:58:00.527883 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 11 10:58:00 crc kubenswrapper[5016]: I1211 10:58:00.542022 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 11 10:58:01 crc kubenswrapper[5016]: I1211 10:58:01.061297 5016 scope.go:117] "RemoveContainer" containerID="6d15fa7748010dc5129f7895fb5fcd4a56140f56188815fba955881660495547" Dec 11 10:58:01 crc kubenswrapper[5016]: I1211 10:58:01.311169 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 11 10:58:01 crc kubenswrapper[5016]: I1211 10:58:01.311655 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 11 10:58:01 crc kubenswrapper[5016]: I1211 10:58:01.514388 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 11 10:58:01 crc kubenswrapper[5016]: I1211 10:58:01.514449 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 11 10:58:01 crc kubenswrapper[5016]: I1211 10:58:01.545795 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 11 10:58:01 crc kubenswrapper[5016]: I1211 10:58:01.567391 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 11 10:58:02 crc kubenswrapper[5016]: I1211 10:58:02.323894 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 11 10:58:02 crc kubenswrapper[5016]: I1211 10:58:02.323959 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 11 10:58:03 crc kubenswrapper[5016]: I1211 10:58:03.333397 5016 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 10:58:03 crc kubenswrapper[5016]: I1211 10:58:03.333737 5016 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 10:58:03 crc kubenswrapper[5016]: I1211 10:58:03.653379 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 11 10:58:03 crc kubenswrapper[5016]: I1211 10:58:03.660127 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 11 10:58:04 crc kubenswrapper[5016]: I1211 10:58:04.697092 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 11 10:58:04 crc kubenswrapper[5016]: I1211 10:58:04.697504 5016 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 11 10:58:04 crc kubenswrapper[5016]: I1211 10:58:04.704790 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 11 10:58:05 crc kubenswrapper[5016]: I1211 10:58:05.430299 5016 scope.go:117] "RemoveContainer" containerID="87b02d484d346f8d27e93aee5e99442085913516127d718a4db5db75b1822a9b" Dec 11 10:58:05 crc kubenswrapper[5016]: I1211 10:58:05.480800 5016 scope.go:117] "RemoveContainer" containerID="bf3b6be1d028ad5de3d8cacf269c742af8a98d81834d5861d12af6c619f617e0" Dec 11 10:58:06 crc kubenswrapper[5016]: I1211 10:58:06.378302 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-w6xjr" event={"ID":"7e3e1081-5b46-471a-8978-804c54a32bc9","Type":"ContainerStarted","Data":"46bfd12debb885d0c6820a06af23ca97f3b687bc9a3be01df22bec6887d980ae"} Dec 11 10:58:06 crc kubenswrapper[5016]: I1211 10:58:06.382092 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff","Type":"ContainerStarted","Data":"52be5c6eadf4a7056532f8ed33f170f0a03357349c94288bc9ce8ce8d47ca89d"} Dec 11 10:58:08 crc kubenswrapper[5016]: I1211 10:58:08.412557 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff","Type":"ContainerStarted","Data":"cdbdaa99885fa4c164c5e11bd0811f5a87204d2e1341dcbb70d5dcebdd1f4366"} Dec 11 10:58:10 crc kubenswrapper[5016]: I1211 10:58:10.434753 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff","Type":"ContainerStarted","Data":"d979e12b0d2ab299631c5539a33791adfbf19db79336dd57642e1e3f684c3fd0"} Dec 11 10:58:11 crc kubenswrapper[5016]: I1211 10:58:11.445810 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff","Type":"ContainerStarted","Data":"024c16cccf7b34e632bcd625b6ce1829d7462ded8e38b4f2fd1606e27f00a92f"} Dec 11 10:58:11 crc kubenswrapper[5016]: I1211 10:58:11.446087 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="ceilometer-central-agent" containerID="cri-o://52be5c6eadf4a7056532f8ed33f170f0a03357349c94288bc9ce8ce8d47ca89d" gracePeriod=30 Dec 11 10:58:11 crc kubenswrapper[5016]: I1211 10:58:11.446305 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 10:58:11 crc kubenswrapper[5016]: I1211 10:58:11.446334 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="proxy-httpd" containerID="cri-o://024c16cccf7b34e632bcd625b6ce1829d7462ded8e38b4f2fd1606e27f00a92f" gracePeriod=30 Dec 11 10:58:11 crc kubenswrapper[5016]: I1211 10:58:11.446389 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="sg-core" containerID="cri-o://d979e12b0d2ab299631c5539a33791adfbf19db79336dd57642e1e3f684c3fd0" gracePeriod=30 Dec 11 10:58:11 crc kubenswrapper[5016]: I1211 10:58:11.446425 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="ceilometer-notification-agent" containerID="cri-o://cdbdaa99885fa4c164c5e11bd0811f5a87204d2e1341dcbb70d5dcebdd1f4366" gracePeriod=30 Dec 11 10:58:11 crc kubenswrapper[5016]: I1211 10:58:11.483381 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-w6xjr" podStartSLOduration=7.285405739 podStartE2EDuration="17.483355616s" podCreationTimestamp="2025-12-11 10:57:54 +0000 UTC" firstStartedPulling="2025-12-11 10:57:55.316347339 +0000 UTC m=+1392.134906918" lastFinishedPulling="2025-12-11 10:58:05.514297206 +0000 UTC m=+1402.332856795" observedRunningTime="2025-12-11 10:58:06.403910889 +0000 UTC m=+1403.222470488" watchObservedRunningTime="2025-12-11 10:58:11.483355616 +0000 UTC m=+1408.301915205" Dec 11 10:58:11 crc kubenswrapper[5016]: I1211 10:58:11.484886 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.259589262 podStartE2EDuration="13.484878583s" podCreationTimestamp="2025-12-11 10:57:58 +0000 UTC" firstStartedPulling="2025-12-11 10:57:59.221618352 +0000 UTC m=+1396.040177941" lastFinishedPulling="2025-12-11 10:58:10.446907683 +0000 UTC m=+1407.265467262" observedRunningTime="2025-12-11 10:58:11.466226214 +0000 UTC m=+1408.284785803" watchObservedRunningTime="2025-12-11 10:58:11.484878583 +0000 UTC m=+1408.303438162" Dec 11 10:58:12 crc kubenswrapper[5016]: I1211 10:58:12.461611 5016 generic.go:334] "Generic (PLEG): container finished" podID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerID="024c16cccf7b34e632bcd625b6ce1829d7462ded8e38b4f2fd1606e27f00a92f" exitCode=0 Dec 11 10:58:12 crc kubenswrapper[5016]: I1211 10:58:12.461654 5016 generic.go:334] "Generic (PLEG): container finished" podID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerID="d979e12b0d2ab299631c5539a33791adfbf19db79336dd57642e1e3f684c3fd0" exitCode=2 Dec 11 10:58:12 crc kubenswrapper[5016]: I1211 10:58:12.461665 5016 generic.go:334] "Generic (PLEG): container finished" podID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerID="cdbdaa99885fa4c164c5e11bd0811f5a87204d2e1341dcbb70d5dcebdd1f4366" exitCode=0 Dec 11 10:58:12 crc kubenswrapper[5016]: I1211 10:58:12.461713 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff","Type":"ContainerDied","Data":"024c16cccf7b34e632bcd625b6ce1829d7462ded8e38b4f2fd1606e27f00a92f"} Dec 11 10:58:12 crc kubenswrapper[5016]: I1211 10:58:12.461779 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff","Type":"ContainerDied","Data":"d979e12b0d2ab299631c5539a33791adfbf19db79336dd57642e1e3f684c3fd0"} Dec 11 10:58:12 crc kubenswrapper[5016]: I1211 10:58:12.461793 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff","Type":"ContainerDied","Data":"cdbdaa99885fa4c164c5e11bd0811f5a87204d2e1341dcbb70d5dcebdd1f4366"} Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.496210 5016 generic.go:334] "Generic (PLEG): container finished" podID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerID="52be5c6eadf4a7056532f8ed33f170f0a03357349c94288bc9ce8ce8d47ca89d" exitCode=0 Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.496293 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff","Type":"ContainerDied","Data":"52be5c6eadf4a7056532f8ed33f170f0a03357349c94288bc9ce8ce8d47ca89d"} Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.580198 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.643595 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-log-httpd\") pod \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.643671 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-combined-ca-bundle\") pod \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.643767 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xs76\" (UniqueName: \"kubernetes.io/projected/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-kube-api-access-2xs76\") pod \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.643842 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-scripts\") pod \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.643875 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-config-data\") pod \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.643899 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-sg-core-conf-yaml\") pod \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.643919 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-run-httpd\") pod \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\" (UID: \"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff\") " Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.644145 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" (UID: "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.644320 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" (UID: "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.644654 5016 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.644676 5016 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.652366 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-scripts" (OuterVolumeSpecName: "scripts") pod "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" (UID: "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.656578 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-kube-api-access-2xs76" (OuterVolumeSpecName: "kube-api-access-2xs76") pod "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" (UID: "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff"). InnerVolumeSpecName "kube-api-access-2xs76". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.675898 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" (UID: "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.725882 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" (UID: "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.746457 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.746498 5016 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.746515 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.746531 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xs76\" (UniqueName: \"kubernetes.io/projected/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-kube-api-access-2xs76\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.747740 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-config-data" (OuterVolumeSpecName: "config-data") pod "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" (UID: "75f2f60e-e725-4ed1-a4f5-2814e7afc3ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:15 crc kubenswrapper[5016]: I1211 10:58:15.848376 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.508929 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"75f2f60e-e725-4ed1-a4f5-2814e7afc3ff","Type":"ContainerDied","Data":"8b544d1fe85f04b3d46a34c7ef41d880a2d5a8242444b295bf2ad67504bb5f1e"} Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.509015 5016 scope.go:117] "RemoveContainer" containerID="024c16cccf7b34e632bcd625b6ce1829d7462ded8e38b4f2fd1606e27f00a92f" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.509048 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.545164 5016 scope.go:117] "RemoveContainer" containerID="d979e12b0d2ab299631c5539a33791adfbf19db79336dd57642e1e3f684c3fd0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.547064 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.557603 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.582984 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:58:16 crc kubenswrapper[5016]: E1211 10:58:16.583397 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="ceilometer-central-agent" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.583415 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="ceilometer-central-agent" Dec 11 10:58:16 crc kubenswrapper[5016]: E1211 10:58:16.583437 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="proxy-httpd" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.583444 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="proxy-httpd" Dec 11 10:58:16 crc kubenswrapper[5016]: E1211 10:58:16.583457 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="sg-core" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.583463 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="sg-core" Dec 11 10:58:16 crc kubenswrapper[5016]: E1211 10:58:16.583475 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="ceilometer-notification-agent" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.583481 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="ceilometer-notification-agent" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.583680 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="sg-core" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.583690 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="proxy-httpd" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.583720 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="ceilometer-central-agent" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.583735 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" containerName="ceilometer-notification-agent" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.585630 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.589758 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.590097 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.593570 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.604027 5016 scope.go:117] "RemoveContainer" containerID="cdbdaa99885fa4c164c5e11bd0811f5a87204d2e1341dcbb70d5dcebdd1f4366" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.626789 5016 scope.go:117] "RemoveContainer" containerID="52be5c6eadf4a7056532f8ed33f170f0a03357349c94288bc9ce8ce8d47ca89d" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.670347 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.670409 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-config-data\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.670435 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-scripts\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.670525 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kg6vk\" (UniqueName: \"kubernetes.io/projected/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-kube-api-access-kg6vk\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.670559 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-log-httpd\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.670589 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-run-httpd\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.670621 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.773555 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-config-data\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.773615 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-scripts\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.773739 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kg6vk\" (UniqueName: \"kubernetes.io/projected/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-kube-api-access-kg6vk\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.773790 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-log-httpd\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.773831 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-run-httpd\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.773881 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.773995 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.774595 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-log-httpd\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.775222 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-run-httpd\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.780425 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.781366 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.781737 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-config-data\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.785992 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-scripts\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.795598 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kg6vk\" (UniqueName: \"kubernetes.io/projected/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-kube-api-access-kg6vk\") pod \"ceilometer-0\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " pod="openstack/ceilometer-0" Dec 11 10:58:16 crc kubenswrapper[5016]: I1211 10:58:16.913232 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:58:17 crc kubenswrapper[5016]: I1211 10:58:17.410337 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:58:17 crc kubenswrapper[5016]: W1211 10:58:17.411228 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9fc5b5a_f1f9_4f4a_a8e1_058346e2efd3.slice/crio-2da8d07bf391eb5974b815e1bafe204b2bbb365ad5ace785c4336cc7ac37debd WatchSource:0}: Error finding container 2da8d07bf391eb5974b815e1bafe204b2bbb365ad5ace785c4336cc7ac37debd: Status 404 returned error can't find the container with id 2da8d07bf391eb5974b815e1bafe204b2bbb365ad5ace785c4336cc7ac37debd Dec 11 10:58:17 crc kubenswrapper[5016]: I1211 10:58:17.486035 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75f2f60e-e725-4ed1-a4f5-2814e7afc3ff" path="/var/lib/kubelet/pods/75f2f60e-e725-4ed1-a4f5-2814e7afc3ff/volumes" Dec 11 10:58:17 crc kubenswrapper[5016]: I1211 10:58:17.518750 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3","Type":"ContainerStarted","Data":"2da8d07bf391eb5974b815e1bafe204b2bbb365ad5ace785c4336cc7ac37debd"} Dec 11 10:58:18 crc kubenswrapper[5016]: I1211 10:58:18.531524 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3","Type":"ContainerStarted","Data":"b6905d7f0836e3e444602c7a84a215ec6e2d42ffb136b17330289e38ac964b76"} Dec 11 10:58:19 crc kubenswrapper[5016]: I1211 10:58:19.541914 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3","Type":"ContainerStarted","Data":"4f706a80693cb96eccbd72a00500d9e91f1ff7c23092b4bcb1e7e5daa3aaae92"} Dec 11 10:58:20 crc kubenswrapper[5016]: I1211 10:58:20.565100 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3","Type":"ContainerStarted","Data":"ede35febcfaff49a4c38867023ab4224323be935919d5c7670e7ac7712e3f8c4"} Dec 11 10:58:21 crc kubenswrapper[5016]: I1211 10:58:21.576194 5016 generic.go:334] "Generic (PLEG): container finished" podID="7e3e1081-5b46-471a-8978-804c54a32bc9" containerID="46bfd12debb885d0c6820a06af23ca97f3b687bc9a3be01df22bec6887d980ae" exitCode=0 Dec 11 10:58:21 crc kubenswrapper[5016]: I1211 10:58:21.576253 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-w6xjr" event={"ID":"7e3e1081-5b46-471a-8978-804c54a32bc9","Type":"ContainerDied","Data":"46bfd12debb885d0c6820a06af23ca97f3b687bc9a3be01df22bec6887d980ae"} Dec 11 10:58:22 crc kubenswrapper[5016]: I1211 10:58:22.588247 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3","Type":"ContainerStarted","Data":"271dc023c649378ab10e0f8c49f1949758bc7f03be9f32c5c5a808a0c562a0f6"} Dec 11 10:58:22 crc kubenswrapper[5016]: I1211 10:58:22.614854 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.600451445 podStartE2EDuration="6.61482901s" podCreationTimestamp="2025-12-11 10:58:16 +0000 UTC" firstStartedPulling="2025-12-11 10:58:17.413740807 +0000 UTC m=+1414.232300386" lastFinishedPulling="2025-12-11 10:58:21.428118372 +0000 UTC m=+1418.246677951" observedRunningTime="2025-12-11 10:58:22.608383514 +0000 UTC m=+1419.426943093" watchObservedRunningTime="2025-12-11 10:58:22.61482901 +0000 UTC m=+1419.433388589" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.072672 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.137651 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-combined-ca-bundle\") pod \"7e3e1081-5b46-471a-8978-804c54a32bc9\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.137721 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-config-data\") pod \"7e3e1081-5b46-471a-8978-804c54a32bc9\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.137758 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w65m8\" (UniqueName: \"kubernetes.io/projected/7e3e1081-5b46-471a-8978-804c54a32bc9-kube-api-access-w65m8\") pod \"7e3e1081-5b46-471a-8978-804c54a32bc9\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.137780 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-scripts\") pod \"7e3e1081-5b46-471a-8978-804c54a32bc9\" (UID: \"7e3e1081-5b46-471a-8978-804c54a32bc9\") " Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.154997 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-scripts" (OuterVolumeSpecName: "scripts") pod "7e3e1081-5b46-471a-8978-804c54a32bc9" (UID: "7e3e1081-5b46-471a-8978-804c54a32bc9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.167582 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e3e1081-5b46-471a-8978-804c54a32bc9-kube-api-access-w65m8" (OuterVolumeSpecName: "kube-api-access-w65m8") pod "7e3e1081-5b46-471a-8978-804c54a32bc9" (UID: "7e3e1081-5b46-471a-8978-804c54a32bc9"). InnerVolumeSpecName "kube-api-access-w65m8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.167963 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e3e1081-5b46-471a-8978-804c54a32bc9" (UID: "7e3e1081-5b46-471a-8978-804c54a32bc9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.173822 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-config-data" (OuterVolumeSpecName: "config-data") pod "7e3e1081-5b46-471a-8978-804c54a32bc9" (UID: "7e3e1081-5b46-471a-8978-804c54a32bc9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.240434 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.240480 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w65m8\" (UniqueName: \"kubernetes.io/projected/7e3e1081-5b46-471a-8978-804c54a32bc9-kube-api-access-w65m8\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.240508 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.240520 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3e1081-5b46-471a-8978-804c54a32bc9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.599708 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-w6xjr" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.600137 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-w6xjr" event={"ID":"7e3e1081-5b46-471a-8978-804c54a32bc9","Type":"ContainerDied","Data":"0ae128b9b061ecde234dcfdabb000d0145ce9e5e6e138bc7f3da9b6daad2e15e"} Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.600156 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ae128b9b061ecde234dcfdabb000d0145ce9e5e6e138bc7f3da9b6daad2e15e" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.600170 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.707985 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 11 10:58:23 crc kubenswrapper[5016]: E1211 10:58:23.708801 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e3e1081-5b46-471a-8978-804c54a32bc9" containerName="nova-cell0-conductor-db-sync" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.709204 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e3e1081-5b46-471a-8978-804c54a32bc9" containerName="nova-cell0-conductor-db-sync" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.709595 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e3e1081-5b46-471a-8978-804c54a32bc9" containerName="nova-cell0-conductor-db-sync" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.710601 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.713242 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.713491 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-lvnwd" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.721292 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.851702 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfdff31d-6c59-4f13-ba0c-e5791bd7fedd-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"dfdff31d-6c59-4f13-ba0c-e5791bd7fedd\") " pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.851819 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqdkj\" (UniqueName: \"kubernetes.io/projected/dfdff31d-6c59-4f13-ba0c-e5791bd7fedd-kube-api-access-dqdkj\") pod \"nova-cell0-conductor-0\" (UID: \"dfdff31d-6c59-4f13-ba0c-e5791bd7fedd\") " pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.851896 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfdff31d-6c59-4f13-ba0c-e5791bd7fedd-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"dfdff31d-6c59-4f13-ba0c-e5791bd7fedd\") " pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.953078 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfdff31d-6c59-4f13-ba0c-e5791bd7fedd-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"dfdff31d-6c59-4f13-ba0c-e5791bd7fedd\") " pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.953444 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfdff31d-6c59-4f13-ba0c-e5791bd7fedd-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"dfdff31d-6c59-4f13-ba0c-e5791bd7fedd\") " pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.953562 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqdkj\" (UniqueName: \"kubernetes.io/projected/dfdff31d-6c59-4f13-ba0c-e5791bd7fedd-kube-api-access-dqdkj\") pod \"nova-cell0-conductor-0\" (UID: \"dfdff31d-6c59-4f13-ba0c-e5791bd7fedd\") " pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.964784 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfdff31d-6c59-4f13-ba0c-e5791bd7fedd-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"dfdff31d-6c59-4f13-ba0c-e5791bd7fedd\") " pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.964964 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfdff31d-6c59-4f13-ba0c-e5791bd7fedd-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"dfdff31d-6c59-4f13-ba0c-e5791bd7fedd\") " pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:23 crc kubenswrapper[5016]: I1211 10:58:23.987526 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqdkj\" (UniqueName: \"kubernetes.io/projected/dfdff31d-6c59-4f13-ba0c-e5791bd7fedd-kube-api-access-dqdkj\") pod \"nova-cell0-conductor-0\" (UID: \"dfdff31d-6c59-4f13-ba0c-e5791bd7fedd\") " pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:24 crc kubenswrapper[5016]: I1211 10:58:24.033351 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:24 crc kubenswrapper[5016]: I1211 10:58:24.533125 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 11 10:58:24 crc kubenswrapper[5016]: I1211 10:58:24.613743 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"dfdff31d-6c59-4f13-ba0c-e5791bd7fedd","Type":"ContainerStarted","Data":"15ccbcbdeb086d1b4c16b329d245f18c40b151eaf678b253dd83c581cc0af4d1"} Dec 11 10:58:24 crc kubenswrapper[5016]: I1211 10:58:24.725536 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:58:25 crc kubenswrapper[5016]: I1211 10:58:25.625334 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="ceilometer-central-agent" containerID="cri-o://b6905d7f0836e3e444602c7a84a215ec6e2d42ffb136b17330289e38ac964b76" gracePeriod=30 Dec 11 10:58:25 crc kubenswrapper[5016]: I1211 10:58:25.625867 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"dfdff31d-6c59-4f13-ba0c-e5791bd7fedd","Type":"ContainerStarted","Data":"0af1308bc5a17e5b37a5b57f4b07e0252fac22adc4d64bd8bdda882e2e636f31"} Dec 11 10:58:25 crc kubenswrapper[5016]: I1211 10:58:25.626474 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="sg-core" containerID="cri-o://ede35febcfaff49a4c38867023ab4224323be935919d5c7670e7ac7712e3f8c4" gracePeriod=30 Dec 11 10:58:25 crc kubenswrapper[5016]: I1211 10:58:25.626563 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="proxy-httpd" containerID="cri-o://271dc023c649378ab10e0f8c49f1949758bc7f03be9f32c5c5a808a0c562a0f6" gracePeriod=30 Dec 11 10:58:25 crc kubenswrapper[5016]: I1211 10:58:25.626635 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="ceilometer-notification-agent" containerID="cri-o://4f706a80693cb96eccbd72a00500d9e91f1ff7c23092b4bcb1e7e5daa3aaae92" gracePeriod=30 Dec 11 10:58:25 crc kubenswrapper[5016]: I1211 10:58:25.626797 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:25 crc kubenswrapper[5016]: I1211 10:58:25.669206 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.669183073 podStartE2EDuration="2.669183073s" podCreationTimestamp="2025-12-11 10:58:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:58:25.65867931 +0000 UTC m=+1422.477238909" watchObservedRunningTime="2025-12-11 10:58:25.669183073 +0000 UTC m=+1422.487742652" Dec 11 10:58:26 crc kubenswrapper[5016]: I1211 10:58:26.639791 5016 generic.go:334] "Generic (PLEG): container finished" podID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerID="271dc023c649378ab10e0f8c49f1949758bc7f03be9f32c5c5a808a0c562a0f6" exitCode=0 Dec 11 10:58:26 crc kubenswrapper[5016]: I1211 10:58:26.640387 5016 generic.go:334] "Generic (PLEG): container finished" podID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerID="ede35febcfaff49a4c38867023ab4224323be935919d5c7670e7ac7712e3f8c4" exitCode=2 Dec 11 10:58:26 crc kubenswrapper[5016]: I1211 10:58:26.640406 5016 generic.go:334] "Generic (PLEG): container finished" podID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerID="4f706a80693cb96eccbd72a00500d9e91f1ff7c23092b4bcb1e7e5daa3aaae92" exitCode=0 Dec 11 10:58:26 crc kubenswrapper[5016]: I1211 10:58:26.640420 5016 generic.go:334] "Generic (PLEG): container finished" podID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerID="b6905d7f0836e3e444602c7a84a215ec6e2d42ffb136b17330289e38ac964b76" exitCode=0 Dec 11 10:58:26 crc kubenswrapper[5016]: I1211 10:58:26.640020 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3","Type":"ContainerDied","Data":"271dc023c649378ab10e0f8c49f1949758bc7f03be9f32c5c5a808a0c562a0f6"} Dec 11 10:58:26 crc kubenswrapper[5016]: I1211 10:58:26.640783 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3","Type":"ContainerDied","Data":"ede35febcfaff49a4c38867023ab4224323be935919d5c7670e7ac7712e3f8c4"} Dec 11 10:58:26 crc kubenswrapper[5016]: I1211 10:58:26.640809 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3","Type":"ContainerDied","Data":"4f706a80693cb96eccbd72a00500d9e91f1ff7c23092b4bcb1e7e5daa3aaae92"} Dec 11 10:58:26 crc kubenswrapper[5016]: I1211 10:58:26.640824 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3","Type":"ContainerDied","Data":"b6905d7f0836e3e444602c7a84a215ec6e2d42ffb136b17330289e38ac964b76"} Dec 11 10:58:26 crc kubenswrapper[5016]: I1211 10:58:26.976351 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.018882 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-run-httpd\") pod \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.019003 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-scripts\") pod \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.019044 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-log-httpd\") pod \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.019092 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-sg-core-conf-yaml\") pod \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.019165 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kg6vk\" (UniqueName: \"kubernetes.io/projected/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-kube-api-access-kg6vk\") pod \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.019213 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-combined-ca-bundle\") pod \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.019330 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-config-data\") pod \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\" (UID: \"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3\") " Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.020776 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" (UID: "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.020965 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" (UID: "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.028289 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-kube-api-access-kg6vk" (OuterVolumeSpecName: "kube-api-access-kg6vk") pod "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" (UID: "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3"). InnerVolumeSpecName "kube-api-access-kg6vk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.029177 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-scripts" (OuterVolumeSpecName: "scripts") pod "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" (UID: "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.064180 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" (UID: "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.121364 5016 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.121399 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.121409 5016 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.121418 5016 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.121430 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kg6vk\" (UniqueName: \"kubernetes.io/projected/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-kube-api-access-kg6vk\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.131597 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" (UID: "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.140790 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-config-data" (OuterVolumeSpecName: "config-data") pod "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" (UID: "f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.223743 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.223793 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.651343 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3","Type":"ContainerDied","Data":"2da8d07bf391eb5974b815e1bafe204b2bbb365ad5ace785c4336cc7ac37debd"} Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.651397 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.651649 5016 scope.go:117] "RemoveContainer" containerID="271dc023c649378ab10e0f8c49f1949758bc7f03be9f32c5c5a808a0c562a0f6" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.683601 5016 scope.go:117] "RemoveContainer" containerID="ede35febcfaff49a4c38867023ab4224323be935919d5c7670e7ac7712e3f8c4" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.686227 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.699168 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.708389 5016 scope.go:117] "RemoveContainer" containerID="4f706a80693cb96eccbd72a00500d9e91f1ff7c23092b4bcb1e7e5daa3aaae92" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.714121 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:58:27 crc kubenswrapper[5016]: E1211 10:58:27.714554 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="sg-core" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.714580 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="sg-core" Dec 11 10:58:27 crc kubenswrapper[5016]: E1211 10:58:27.714601 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="ceilometer-central-agent" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.714610 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="ceilometer-central-agent" Dec 11 10:58:27 crc kubenswrapper[5016]: E1211 10:58:27.714636 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="ceilometer-notification-agent" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.714642 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="ceilometer-notification-agent" Dec 11 10:58:27 crc kubenswrapper[5016]: E1211 10:58:27.714651 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="proxy-httpd" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.714657 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="proxy-httpd" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.714873 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="ceilometer-notification-agent" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.714913 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="sg-core" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.714922 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="proxy-httpd" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.714931 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" containerName="ceilometer-central-agent" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.716620 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.721194 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.721786 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.726055 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.738252 5016 scope.go:117] "RemoveContainer" containerID="b6905d7f0836e3e444602c7a84a215ec6e2d42ffb136b17330289e38ac964b76" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.841003 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g9mv\" (UniqueName: \"kubernetes.io/projected/387799aa-2f39-4fb3-8d62-225c9e3dcf47-kube-api-access-7g9mv\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.841146 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-scripts\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.841443 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/387799aa-2f39-4fb3-8d62-225c9e3dcf47-run-httpd\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.841601 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.841701 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/387799aa-2f39-4fb3-8d62-225c9e3dcf47-log-httpd\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.841758 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-config-data\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.841963 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.943740 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-scripts\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.943818 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/387799aa-2f39-4fb3-8d62-225c9e3dcf47-run-httpd\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.943841 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.943882 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/387799aa-2f39-4fb3-8d62-225c9e3dcf47-log-httpd\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.943910 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-config-data\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.943990 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.944065 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g9mv\" (UniqueName: \"kubernetes.io/projected/387799aa-2f39-4fb3-8d62-225c9e3dcf47-kube-api-access-7g9mv\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.946430 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/387799aa-2f39-4fb3-8d62-225c9e3dcf47-log-httpd\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.946987 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/387799aa-2f39-4fb3-8d62-225c9e3dcf47-run-httpd\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.950646 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-config-data\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.956748 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.956956 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.957467 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-scripts\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:27 crc kubenswrapper[5016]: I1211 10:58:27.963664 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g9mv\" (UniqueName: \"kubernetes.io/projected/387799aa-2f39-4fb3-8d62-225c9e3dcf47-kube-api-access-7g9mv\") pod \"ceilometer-0\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " pod="openstack/ceilometer-0" Dec 11 10:58:28 crc kubenswrapper[5016]: I1211 10:58:28.058434 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:58:28 crc kubenswrapper[5016]: I1211 10:58:28.534695 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 10:58:28 crc kubenswrapper[5016]: I1211 10:58:28.548175 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:58:28 crc kubenswrapper[5016]: I1211 10:58:28.665228 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"387799aa-2f39-4fb3-8d62-225c9e3dcf47","Type":"ContainerStarted","Data":"d2b22cd4d32343643c71e4c54cdff91036ac3a6d5eb35f3577ca38e79c27aaf3"} Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.060365 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.492037 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3" path="/var/lib/kubelet/pods/f9fc5b5a-f1f9-4f4a-a8e1-058346e2efd3/volumes" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.526495 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-ftrtx"] Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.528405 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.533873 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.533949 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.542609 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-ftrtx"] Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.578434 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-ftrtx\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.578606 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9k78v\" (UniqueName: \"kubernetes.io/projected/b475a2c8-d3b9-4b61-a58a-a806599c689a-kube-api-access-9k78v\") pod \"nova-cell0-cell-mapping-ftrtx\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.578729 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-config-data\") pod \"nova-cell0-cell-mapping-ftrtx\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.578779 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-scripts\") pod \"nova-cell0-cell-mapping-ftrtx\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.680991 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-scripts\") pod \"nova-cell0-cell-mapping-ftrtx\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.681620 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-ftrtx\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.681741 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9k78v\" (UniqueName: \"kubernetes.io/projected/b475a2c8-d3b9-4b61-a58a-a806599c689a-kube-api-access-9k78v\") pod \"nova-cell0-cell-mapping-ftrtx\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.681828 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-config-data\") pod \"nova-cell0-cell-mapping-ftrtx\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.696147 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-ftrtx\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.697373 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-scripts\") pod \"nova-cell0-cell-mapping-ftrtx\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.704493 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-config-data\") pod \"nova-cell0-cell-mapping-ftrtx\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.717356 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"387799aa-2f39-4fb3-8d62-225c9e3dcf47","Type":"ContainerStarted","Data":"adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f"} Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.727671 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9k78v\" (UniqueName: \"kubernetes.io/projected/b475a2c8-d3b9-4b61-a58a-a806599c689a-kube-api-access-9k78v\") pod \"nova-cell0-cell-mapping-ftrtx\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.770496 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.773184 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.787741 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.822532 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.823863 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.828151 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.855540 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.859084 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.873015 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.885818 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee62f30c-abf1-4bd8-819f-5ba8d046b028-config-data\") pod \"nova-api-0\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " pod="openstack/nova-api-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.885878 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a17b46-eb85-4f0f-a6ea-9db9871f48df-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.885930 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gvzt\" (UniqueName: \"kubernetes.io/projected/ee62f30c-abf1-4bd8-819f-5ba8d046b028-kube-api-access-6gvzt\") pod \"nova-api-0\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " pod="openstack/nova-api-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.885982 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnf5w\" (UniqueName: \"kubernetes.io/projected/97a17b46-eb85-4f0f-a6ea-9db9871f48df-kube-api-access-nnf5w\") pod \"nova-cell1-novncproxy-0\" (UID: \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.886052 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee62f30c-abf1-4bd8-819f-5ba8d046b028-logs\") pod \"nova-api-0\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " pod="openstack/nova-api-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.886091 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a17b46-eb85-4f0f-a6ea-9db9871f48df-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.886175 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee62f30c-abf1-4bd8-819f-5ba8d046b028-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " pod="openstack/nova-api-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.950931 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.956318 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.960309 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.991061 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee62f30c-abf1-4bd8-819f-5ba8d046b028-logs\") pod \"nova-api-0\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " pod="openstack/nova-api-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.991136 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a17b46-eb85-4f0f-a6ea-9db9871f48df-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.991188 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee62f30c-abf1-4bd8-819f-5ba8d046b028-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " pod="openstack/nova-api-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.991232 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83572e5b-69fa-4064-8b2f-cd1653a28754-config-data\") pod \"nova-scheduler-0\" (UID: \"83572e5b-69fa-4064-8b2f-cd1653a28754\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.991284 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee62f30c-abf1-4bd8-819f-5ba8d046b028-config-data\") pod \"nova-api-0\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " pod="openstack/nova-api-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.991312 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a17b46-eb85-4f0f-a6ea-9db9871f48df-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.991360 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83572e5b-69fa-4064-8b2f-cd1653a28754-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"83572e5b-69fa-4064-8b2f-cd1653a28754\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.991395 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gvzt\" (UniqueName: \"kubernetes.io/projected/ee62f30c-abf1-4bd8-819f-5ba8d046b028-kube-api-access-6gvzt\") pod \"nova-api-0\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " pod="openstack/nova-api-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.991436 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnf5w\" (UniqueName: \"kubernetes.io/projected/97a17b46-eb85-4f0f-a6ea-9db9871f48df-kube-api-access-nnf5w\") pod \"nova-cell1-novncproxy-0\" (UID: \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.991462 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmtxh\" (UniqueName: \"kubernetes.io/projected/83572e5b-69fa-4064-8b2f-cd1653a28754-kube-api-access-xmtxh\") pod \"nova-scheduler-0\" (UID: \"83572e5b-69fa-4064-8b2f-cd1653a28754\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:29 crc kubenswrapper[5016]: I1211 10:58:29.992100 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee62f30c-abf1-4bd8-819f-5ba8d046b028-logs\") pod \"nova-api-0\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " pod="openstack/nova-api-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:29.999834 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.004767 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee62f30c-abf1-4bd8-819f-5ba8d046b028-config-data\") pod \"nova-api-0\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " pod="openstack/nova-api-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.011778 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee62f30c-abf1-4bd8-819f-5ba8d046b028-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " pod="openstack/nova-api-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.015704 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a17b46-eb85-4f0f-a6ea-9db9871f48df-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.032168 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a17b46-eb85-4f0f-a6ea-9db9871f48df-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.041362 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnf5w\" (UniqueName: \"kubernetes.io/projected/97a17b46-eb85-4f0f-a6ea-9db9871f48df-kube-api-access-nnf5w\") pod \"nova-cell1-novncproxy-0\" (UID: \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.041765 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.048205 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.051852 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.058543 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gvzt\" (UniqueName: \"kubernetes.io/projected/ee62f30c-abf1-4bd8-819f-5ba8d046b028-kube-api-access-6gvzt\") pod \"nova-api-0\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " pod="openstack/nova-api-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.083103 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.093115 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqzch\" (UniqueName: \"kubernetes.io/projected/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-kube-api-access-kqzch\") pod \"nova-metadata-0\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.093201 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83572e5b-69fa-4064-8b2f-cd1653a28754-config-data\") pod \"nova-scheduler-0\" (UID: \"83572e5b-69fa-4064-8b2f-cd1653a28754\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.093257 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-config-data\") pod \"nova-metadata-0\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.093280 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-logs\") pod \"nova-metadata-0\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.093309 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83572e5b-69fa-4064-8b2f-cd1653a28754-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"83572e5b-69fa-4064-8b2f-cd1653a28754\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.093347 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmtxh\" (UniqueName: \"kubernetes.io/projected/83572e5b-69fa-4064-8b2f-cd1653a28754-kube-api-access-xmtxh\") pod \"nova-scheduler-0\" (UID: \"83572e5b-69fa-4064-8b2f-cd1653a28754\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.093376 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.110227 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83572e5b-69fa-4064-8b2f-cd1653a28754-config-data\") pod \"nova-scheduler-0\" (UID: \"83572e5b-69fa-4064-8b2f-cd1653a28754\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.116804 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83572e5b-69fa-4064-8b2f-cd1653a28754-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"83572e5b-69fa-4064-8b2f-cd1653a28754\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.125853 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmtxh\" (UniqueName: \"kubernetes.io/projected/83572e5b-69fa-4064-8b2f-cd1653a28754-kube-api-access-xmtxh\") pod \"nova-scheduler-0\" (UID: \"83572e5b-69fa-4064-8b2f-cd1653a28754\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.155639 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.159882 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-5p6nv"] Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.162389 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.177025 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-5p6nv"] Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.214221 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.260683 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.260777 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.260862 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.260913 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqzch\" (UniqueName: \"kubernetes.io/projected/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-kube-api-access-kqzch\") pod \"nova-metadata-0\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.261044 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5l9q\" (UniqueName: \"kubernetes.io/projected/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-kube-api-access-z5l9q\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.268446 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-config-data\") pod \"nova-metadata-0\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.268514 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-logs\") pod \"nova-metadata-0\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.268553 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-config\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.269805 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-logs\") pod \"nova-metadata-0\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.270045 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-dns-svc\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.270113 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.295699 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqzch\" (UniqueName: \"kubernetes.io/projected/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-kube-api-access-kqzch\") pod \"nova-metadata-0\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.300286 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-config-data\") pod \"nova-metadata-0\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.305158 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.322578 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.385514 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-config\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.385585 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-dns-svc\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.385649 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.385667 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.385694 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.385734 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5l9q\" (UniqueName: \"kubernetes.io/projected/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-kube-api-access-z5l9q\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.387118 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-config\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.387503 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-dns-svc\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.388861 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.389663 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.390265 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.390847 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.414825 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5l9q\" (UniqueName: \"kubernetes.io/projected/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-kube-api-access-z5l9q\") pod \"dnsmasq-dns-bccf8f775-5p6nv\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.462038 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.701335 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-ftrtx"] Dec 11 10:58:30 crc kubenswrapper[5016]: W1211 10:58:30.733565 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb475a2c8_d3b9_4b61_a58a_a806599c689a.slice/crio-05f2279ea625085be64d080a7ca4ddd65dd3cb4818b111924dc518e61300f127 WatchSource:0}: Error finding container 05f2279ea625085be64d080a7ca4ddd65dd3cb4818b111924dc518e61300f127: Status 404 returned error can't find the container with id 05f2279ea625085be64d080a7ca4ddd65dd3cb4818b111924dc518e61300f127 Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.797992 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:58:30 crc kubenswrapper[5016]: W1211 10:58:30.810242 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee62f30c_abf1_4bd8_819f_5ba8d046b028.slice/crio-9ec169caa6f93a5756da45733284e58896a533a49945bfa789e7e3710fcf3599 WatchSource:0}: Error finding container 9ec169caa6f93a5756da45733284e58896a533a49945bfa789e7e3710fcf3599: Status 404 returned error can't find the container with id 9ec169caa6f93a5756da45733284e58896a533a49945bfa789e7e3710fcf3599 Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.968619 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-d5pmm"] Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.973392 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.977702 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.978012 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 11 10:58:30 crc kubenswrapper[5016]: I1211 10:58:30.989470 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-d5pmm"] Dec 11 10:58:30 crc kubenswrapper[5016]: W1211 10:58:30.996073 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83572e5b_69fa_4064_8b2f_cd1653a28754.slice/crio-5dd13aa0adc614425e2cc24f4e835054af41b5ccf960548b4812cc9a48d7b06d WatchSource:0}: Error finding container 5dd13aa0adc614425e2cc24f4e835054af41b5ccf960548b4812cc9a48d7b06d: Status 404 returned error can't find the container with id 5dd13aa0adc614425e2cc24f4e835054af41b5ccf960548b4812cc9a48d7b06d Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.010647 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.021988 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.118219 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95rcn\" (UniqueName: \"kubernetes.io/projected/80b378da-6397-4b63-8eb4-3a2a465e6425-kube-api-access-95rcn\") pod \"nova-cell1-conductor-db-sync-d5pmm\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.118628 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-d5pmm\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.118750 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-scripts\") pod \"nova-cell1-conductor-db-sync-d5pmm\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.118973 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-config-data\") pod \"nova-cell1-conductor-db-sync-d5pmm\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.220987 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-scripts\") pod \"nova-cell1-conductor-db-sync-d5pmm\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.221085 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-config-data\") pod \"nova-cell1-conductor-db-sync-d5pmm\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.221156 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95rcn\" (UniqueName: \"kubernetes.io/projected/80b378da-6397-4b63-8eb4-3a2a465e6425-kube-api-access-95rcn\") pod \"nova-cell1-conductor-db-sync-d5pmm\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.221321 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-d5pmm\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.226554 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-d5pmm\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.226687 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-config-data\") pod \"nova-cell1-conductor-db-sync-d5pmm\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.238881 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-scripts\") pod \"nova-cell1-conductor-db-sync-d5pmm\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.244877 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95rcn\" (UniqueName: \"kubernetes.io/projected/80b378da-6397-4b63-8eb4-3a2a465e6425-kube-api-access-95rcn\") pod \"nova-cell1-conductor-db-sync-d5pmm\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.287179 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-5p6nv"] Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.296640 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.300201 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.742007 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" event={"ID":"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3","Type":"ContainerStarted","Data":"364a07a91b998989a8cfb841a021971dcad2c28535860046e059f1325c41d79e"} Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.744074 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-ftrtx" event={"ID":"b475a2c8-d3b9-4b61-a58a-a806599c689a","Type":"ContainerStarted","Data":"a7d200cf5638600ce4b49d4c48c3f919d152702a0254d8dee213992fe20f91b9"} Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.744111 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-ftrtx" event={"ID":"b475a2c8-d3b9-4b61-a58a-a806599c689a","Type":"ContainerStarted","Data":"05f2279ea625085be64d080a7ca4ddd65dd3cb4818b111924dc518e61300f127"} Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.746870 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83572e5b-69fa-4064-8b2f-cd1653a28754","Type":"ContainerStarted","Data":"5dd13aa0adc614425e2cc24f4e835054af41b5ccf960548b4812cc9a48d7b06d"} Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.749295 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27","Type":"ContainerStarted","Data":"56444b31db778d77b7241563d3108c4ae322ce7b1d7371f1dc685128e73be880"} Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.752116 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"97a17b46-eb85-4f0f-a6ea-9db9871f48df","Type":"ContainerStarted","Data":"98d2ecf8613fe31b2b5ebbba30fa514c1e93c2ddfa8a5c71c4a3e198689e1b79"} Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.754170 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee62f30c-abf1-4bd8-819f-5ba8d046b028","Type":"ContainerStarted","Data":"9ec169caa6f93a5756da45733284e58896a533a49945bfa789e7e3710fcf3599"} Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.773157 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-ftrtx" podStartSLOduration=2.773131157 podStartE2EDuration="2.773131157s" podCreationTimestamp="2025-12-11 10:58:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:58:31.765990764 +0000 UTC m=+1428.584550353" watchObservedRunningTime="2025-12-11 10:58:31.773131157 +0000 UTC m=+1428.591690746" Dec 11 10:58:31 crc kubenswrapper[5016]: I1211 10:58:31.818431 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-d5pmm"] Dec 11 10:58:32 crc kubenswrapper[5016]: I1211 10:58:32.776636 5016 generic.go:334] "Generic (PLEG): container finished" podID="8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" containerID="2e22ea3e2379155e21a3130edd82887a257bfd53b1cd64da60a01764d1f430bd" exitCode=0 Dec 11 10:58:32 crc kubenswrapper[5016]: I1211 10:58:32.777030 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" event={"ID":"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3","Type":"ContainerDied","Data":"2e22ea3e2379155e21a3130edd82887a257bfd53b1cd64da60a01764d1f430bd"} Dec 11 10:58:32 crc kubenswrapper[5016]: I1211 10:58:32.781988 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-d5pmm" event={"ID":"80b378da-6397-4b63-8eb4-3a2a465e6425","Type":"ContainerStarted","Data":"b38178e5c54cc99963dad2af4beb7cc0015ee856d538a7f49d2b020adb5b4709"} Dec 11 10:58:32 crc kubenswrapper[5016]: I1211 10:58:32.782051 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-d5pmm" event={"ID":"80b378da-6397-4b63-8eb4-3a2a465e6425","Type":"ContainerStarted","Data":"1fc6eb7d045ac479473a51cd688569b83f8daa105c424edf8ad32c9b873e29fe"} Dec 11 10:58:32 crc kubenswrapper[5016]: I1211 10:58:32.847081 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-d5pmm" podStartSLOduration=2.847056324 podStartE2EDuration="2.847056324s" podCreationTimestamp="2025-12-11 10:58:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:58:32.837082512 +0000 UTC m=+1429.655642101" watchObservedRunningTime="2025-12-11 10:58:32.847056324 +0000 UTC m=+1429.665615913" Dec 11 10:58:33 crc kubenswrapper[5016]: I1211 10:58:33.514606 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:58:33 crc kubenswrapper[5016]: I1211 10:58:33.518778 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 10:58:33 crc kubenswrapper[5016]: I1211 10:58:33.795559 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" event={"ID":"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3","Type":"ContainerStarted","Data":"715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b"} Dec 11 10:58:33 crc kubenswrapper[5016]: I1211 10:58:33.796104 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:33 crc kubenswrapper[5016]: I1211 10:58:33.814892 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" podStartSLOduration=3.814863352 podStartE2EDuration="3.814863352s" podCreationTimestamp="2025-12-11 10:58:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:58:33.814192885 +0000 UTC m=+1430.632752484" watchObservedRunningTime="2025-12-11 10:58:33.814863352 +0000 UTC m=+1430.633422961" Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.827399 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"387799aa-2f39-4fb3-8d62-225c9e3dcf47","Type":"ContainerStarted","Data":"7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610"} Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.830732 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83572e5b-69fa-4064-8b2f-cd1653a28754","Type":"ContainerStarted","Data":"53f61a25aa2d06bbe25ea6601eace5a58ae621720ec78c0d106b9b555898ebd1"} Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.833988 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27","Type":"ContainerStarted","Data":"8101fffa9dc16ab88014bd0f7a4f041d13bb050825e8529dfd12a4d80a357a5c"} Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.834198 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27","Type":"ContainerStarted","Data":"9c9604449c8ccf4d833a48163613ecce9140a69b7581c202587de7e1985abbdb"} Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.834054 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" containerName="nova-metadata-log" containerID="cri-o://9c9604449c8ccf4d833a48163613ecce9140a69b7581c202587de7e1985abbdb" gracePeriod=30 Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.834344 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" containerName="nova-metadata-metadata" containerID="cri-o://8101fffa9dc16ab88014bd0f7a4f041d13bb050825e8529dfd12a4d80a357a5c" gracePeriod=30 Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.845128 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"97a17b46-eb85-4f0f-a6ea-9db9871f48df","Type":"ContainerStarted","Data":"c80f9b2b2b6fbafeb311b1508cda8a626cdeb1473cef03e42f3c3343617d0bab"} Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.845349 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="97a17b46-eb85-4f0f-a6ea-9db9871f48df" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://c80f9b2b2b6fbafeb311b1508cda8a626cdeb1473cef03e42f3c3343617d0bab" gracePeriod=30 Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.856199 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee62f30c-abf1-4bd8-819f-5ba8d046b028","Type":"ContainerStarted","Data":"8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0"} Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.856364 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee62f30c-abf1-4bd8-819f-5ba8d046b028","Type":"ContainerStarted","Data":"96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b"} Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.865491 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.683375992 podStartE2EDuration="7.865468976s" podCreationTimestamp="2025-12-11 10:58:29 +0000 UTC" firstStartedPulling="2025-12-11 10:58:31.000116105 +0000 UTC m=+1427.818675684" lastFinishedPulling="2025-12-11 10:58:36.182209089 +0000 UTC m=+1433.000768668" observedRunningTime="2025-12-11 10:58:36.855915025 +0000 UTC m=+1433.674474604" watchObservedRunningTime="2025-12-11 10:58:36.865468976 +0000 UTC m=+1433.684028565" Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.891471 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.174581727 podStartE2EDuration="7.891438501s" podCreationTimestamp="2025-12-11 10:58:29 +0000 UTC" firstStartedPulling="2025-12-11 10:58:31.288045029 +0000 UTC m=+1428.106604608" lastFinishedPulling="2025-12-11 10:58:36.004901803 +0000 UTC m=+1432.823461382" observedRunningTime="2025-12-11 10:58:36.881258436 +0000 UTC m=+1433.699818035" watchObservedRunningTime="2025-12-11 10:58:36.891438501 +0000 UTC m=+1433.709998100" Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.909810 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.916835262 podStartE2EDuration="7.909785414s" podCreationTimestamp="2025-12-11 10:58:29 +0000 UTC" firstStartedPulling="2025-12-11 10:58:31.011823868 +0000 UTC m=+1427.830383447" lastFinishedPulling="2025-12-11 10:58:36.00477402 +0000 UTC m=+1432.823333599" observedRunningTime="2025-12-11 10:58:36.903729708 +0000 UTC m=+1433.722289307" watchObservedRunningTime="2025-12-11 10:58:36.909785414 +0000 UTC m=+1433.728345003" Dec 11 10:58:36 crc kubenswrapper[5016]: I1211 10:58:36.954128 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.7354824989999997 podStartE2EDuration="7.954106743s" podCreationTimestamp="2025-12-11 10:58:29 +0000 UTC" firstStartedPulling="2025-12-11 10:58:30.814810037 +0000 UTC m=+1427.633369606" lastFinishedPulling="2025-12-11 10:58:36.033434271 +0000 UTC m=+1432.851993850" observedRunningTime="2025-12-11 10:58:36.940190597 +0000 UTC m=+1433.758750176" watchObservedRunningTime="2025-12-11 10:58:36.954106743 +0000 UTC m=+1433.772666332" Dec 11 10:58:37 crc kubenswrapper[5016]: I1211 10:58:37.872181 5016 generic.go:334] "Generic (PLEG): container finished" podID="13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" containerID="9c9604449c8ccf4d833a48163613ecce9140a69b7581c202587de7e1985abbdb" exitCode=143 Dec 11 10:58:37 crc kubenswrapper[5016]: I1211 10:58:37.873395 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27","Type":"ContainerDied","Data":"9c9604449c8ccf4d833a48163613ecce9140a69b7581c202587de7e1985abbdb"} Dec 11 10:58:39 crc kubenswrapper[5016]: I1211 10:58:39.891264 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"387799aa-2f39-4fb3-8d62-225c9e3dcf47","Type":"ContainerStarted","Data":"1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b"} Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.156750 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.156822 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.217379 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.326984 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.327088 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.369862 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.391684 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.391760 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.464218 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.570864 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-9xt82"] Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.571168 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" podUID="a1e43ec9-f406-4d7b-8928-3e3ae504973d" containerName="dnsmasq-dns" containerID="cri-o://629aa68ce39880cf13d5df3823e1ed3793c73ed76ea74abecd6b3c44e97522d4" gracePeriod=10 Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.923658 5016 generic.go:334] "Generic (PLEG): container finished" podID="a1e43ec9-f406-4d7b-8928-3e3ae504973d" containerID="629aa68ce39880cf13d5df3823e1ed3793c73ed76ea74abecd6b3c44e97522d4" exitCode=0 Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.923751 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" event={"ID":"a1e43ec9-f406-4d7b-8928-3e3ae504973d","Type":"ContainerDied","Data":"629aa68ce39880cf13d5df3823e1ed3793c73ed76ea74abecd6b3c44e97522d4"} Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.935998 5016 generic.go:334] "Generic (PLEG): container finished" podID="b475a2c8-d3b9-4b61-a58a-a806599c689a" containerID="a7d200cf5638600ce4b49d4c48c3f919d152702a0254d8dee213992fe20f91b9" exitCode=0 Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.936098 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-ftrtx" event={"ID":"b475a2c8-d3b9-4b61-a58a-a806599c689a","Type":"ContainerDied","Data":"a7d200cf5638600ce4b49d4c48c3f919d152702a0254d8dee213992fe20f91b9"} Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.950699 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"387799aa-2f39-4fb3-8d62-225c9e3dcf47","Type":"ContainerStarted","Data":"24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7"} Dec 11 10:58:40 crc kubenswrapper[5016]: I1211 10:58:40.950834 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.029923 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.389603578 podStartE2EDuration="14.029899308s" podCreationTimestamp="2025-12-11 10:58:27 +0000 UTC" firstStartedPulling="2025-12-11 10:58:28.534123269 +0000 UTC m=+1425.352682848" lastFinishedPulling="2025-12-11 10:58:40.174418999 +0000 UTC m=+1436.992978578" observedRunningTime="2025-12-11 10:58:40.993565142 +0000 UTC m=+1437.812124721" watchObservedRunningTime="2025-12-11 10:58:41.029899308 +0000 UTC m=+1437.848458887" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.055298 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.239195 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.182:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.239257 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.182:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.292535 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.404134 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-ovsdbserver-sb\") pod \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.404489 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-dns-swift-storage-0\") pod \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.404519 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-dns-svc\") pod \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.404555 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-config\") pod \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.404581 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-ovsdbserver-nb\") pod \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.404612 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xk949\" (UniqueName: \"kubernetes.io/projected/a1e43ec9-f406-4d7b-8928-3e3ae504973d-kube-api-access-xk949\") pod \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\" (UID: \"a1e43ec9-f406-4d7b-8928-3e3ae504973d\") " Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.414174 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1e43ec9-f406-4d7b-8928-3e3ae504973d-kube-api-access-xk949" (OuterVolumeSpecName: "kube-api-access-xk949") pod "a1e43ec9-f406-4d7b-8928-3e3ae504973d" (UID: "a1e43ec9-f406-4d7b-8928-3e3ae504973d"). InnerVolumeSpecName "kube-api-access-xk949". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.467355 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a1e43ec9-f406-4d7b-8928-3e3ae504973d" (UID: "a1e43ec9-f406-4d7b-8928-3e3ae504973d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.467549 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a1e43ec9-f406-4d7b-8928-3e3ae504973d" (UID: "a1e43ec9-f406-4d7b-8928-3e3ae504973d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.487552 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-config" (OuterVolumeSpecName: "config") pod "a1e43ec9-f406-4d7b-8928-3e3ae504973d" (UID: "a1e43ec9-f406-4d7b-8928-3e3ae504973d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.496477 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a1e43ec9-f406-4d7b-8928-3e3ae504973d" (UID: "a1e43ec9-f406-4d7b-8928-3e3ae504973d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.507378 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.507420 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.507434 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xk949\" (UniqueName: \"kubernetes.io/projected/a1e43ec9-f406-4d7b-8928-3e3ae504973d-kube-api-access-xk949\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.507444 5016 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.507454 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.516608 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a1e43ec9-f406-4d7b-8928-3e3ae504973d" (UID: "a1e43ec9-f406-4d7b-8928-3e3ae504973d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.609502 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1e43ec9-f406-4d7b-8928-3e3ae504973d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.980991 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" event={"ID":"a1e43ec9-f406-4d7b-8928-3e3ae504973d","Type":"ContainerDied","Data":"cb0ab21bfb56a0ea1604dbe07204d76232d23b46a803378a3ceeb26d67cc005d"} Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.981083 5016 scope.go:117] "RemoveContainer" containerID="629aa68ce39880cf13d5df3823e1ed3793c73ed76ea74abecd6b3c44e97522d4" Dec 11 10:58:41 crc kubenswrapper[5016]: I1211 10:58:41.982071 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-9xt82" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.017733 5016 scope.go:117] "RemoveContainer" containerID="5d80ddf340a63b1965af906c66d29de8220f43794b62ae462a8059ef41368671" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.028273 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-9xt82"] Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.045931 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-9xt82"] Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.407909 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.535259 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-config-data\") pod \"b475a2c8-d3b9-4b61-a58a-a806599c689a\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.535579 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9k78v\" (UniqueName: \"kubernetes.io/projected/b475a2c8-d3b9-4b61-a58a-a806599c689a-kube-api-access-9k78v\") pod \"b475a2c8-d3b9-4b61-a58a-a806599c689a\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.535648 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-scripts\") pod \"b475a2c8-d3b9-4b61-a58a-a806599c689a\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.535715 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-combined-ca-bundle\") pod \"b475a2c8-d3b9-4b61-a58a-a806599c689a\" (UID: \"b475a2c8-d3b9-4b61-a58a-a806599c689a\") " Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.571050 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b475a2c8-d3b9-4b61-a58a-a806599c689a-kube-api-access-9k78v" (OuterVolumeSpecName: "kube-api-access-9k78v") pod "b475a2c8-d3b9-4b61-a58a-a806599c689a" (UID: "b475a2c8-d3b9-4b61-a58a-a806599c689a"). InnerVolumeSpecName "kube-api-access-9k78v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.574127 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-scripts" (OuterVolumeSpecName: "scripts") pod "b475a2c8-d3b9-4b61-a58a-a806599c689a" (UID: "b475a2c8-d3b9-4b61-a58a-a806599c689a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.584172 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-config-data" (OuterVolumeSpecName: "config-data") pod "b475a2c8-d3b9-4b61-a58a-a806599c689a" (UID: "b475a2c8-d3b9-4b61-a58a-a806599c689a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.585179 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b475a2c8-d3b9-4b61-a58a-a806599c689a" (UID: "b475a2c8-d3b9-4b61-a58a-a806599c689a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.639412 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.639460 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9k78v\" (UniqueName: \"kubernetes.io/projected/b475a2c8-d3b9-4b61-a58a-a806599c689a-kube-api-access-9k78v\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.639472 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.639485 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b475a2c8-d3b9-4b61-a58a-a806599c689a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.932826 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.933174 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.991255 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-ftrtx" event={"ID":"b475a2c8-d3b9-4b61-a58a-a806599c689a","Type":"ContainerDied","Data":"05f2279ea625085be64d080a7ca4ddd65dd3cb4818b111924dc518e61300f127"} Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.991300 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05f2279ea625085be64d080a7ca4ddd65dd3cb4818b111924dc518e61300f127" Dec 11 10:58:42 crc kubenswrapper[5016]: I1211 10:58:42.991653 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-ftrtx" Dec 11 10:58:43 crc kubenswrapper[5016]: I1211 10:58:43.156624 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:58:43 crc kubenswrapper[5016]: I1211 10:58:43.156888 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" containerName="nova-api-log" containerID="cri-o://96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b" gracePeriod=30 Dec 11 10:58:43 crc kubenswrapper[5016]: I1211 10:58:43.156998 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" containerName="nova-api-api" containerID="cri-o://8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0" gracePeriod=30 Dec 11 10:58:43 crc kubenswrapper[5016]: I1211 10:58:43.179327 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:58:43 crc kubenswrapper[5016]: I1211 10:58:43.179683 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="83572e5b-69fa-4064-8b2f-cd1653a28754" containerName="nova-scheduler-scheduler" containerID="cri-o://53f61a25aa2d06bbe25ea6601eace5a58ae621720ec78c0d106b9b555898ebd1" gracePeriod=30 Dec 11 10:58:43 crc kubenswrapper[5016]: I1211 10:58:43.516975 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1e43ec9-f406-4d7b-8928-3e3ae504973d" path="/var/lib/kubelet/pods/a1e43ec9-f406-4d7b-8928-3e3ae504973d/volumes" Dec 11 10:58:44 crc kubenswrapper[5016]: I1211 10:58:44.004493 5016 generic.go:334] "Generic (PLEG): container finished" podID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" containerID="96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b" exitCode=143 Dec 11 10:58:44 crc kubenswrapper[5016]: I1211 10:58:44.004589 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee62f30c-abf1-4bd8-819f-5ba8d046b028","Type":"ContainerDied","Data":"96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b"} Dec 11 10:58:45 crc kubenswrapper[5016]: I1211 10:58:45.016225 5016 generic.go:334] "Generic (PLEG): container finished" podID="83572e5b-69fa-4064-8b2f-cd1653a28754" containerID="53f61a25aa2d06bbe25ea6601eace5a58ae621720ec78c0d106b9b555898ebd1" exitCode=0 Dec 11 10:58:45 crc kubenswrapper[5016]: I1211 10:58:45.016281 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83572e5b-69fa-4064-8b2f-cd1653a28754","Type":"ContainerDied","Data":"53f61a25aa2d06bbe25ea6601eace5a58ae621720ec78c0d106b9b555898ebd1"} Dec 11 10:58:45 crc kubenswrapper[5016]: E1211 10:58:45.327698 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53f61a25aa2d06bbe25ea6601eace5a58ae621720ec78c0d106b9b555898ebd1 is running failed: container process not found" containerID="53f61a25aa2d06bbe25ea6601eace5a58ae621720ec78c0d106b9b555898ebd1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 10:58:45 crc kubenswrapper[5016]: E1211 10:58:45.328627 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53f61a25aa2d06bbe25ea6601eace5a58ae621720ec78c0d106b9b555898ebd1 is running failed: container process not found" containerID="53f61a25aa2d06bbe25ea6601eace5a58ae621720ec78c0d106b9b555898ebd1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 10:58:45 crc kubenswrapper[5016]: E1211 10:58:45.329219 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53f61a25aa2d06bbe25ea6601eace5a58ae621720ec78c0d106b9b555898ebd1 is running failed: container process not found" containerID="53f61a25aa2d06bbe25ea6601eace5a58ae621720ec78c0d106b9b555898ebd1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 10:58:45 crc kubenswrapper[5016]: E1211 10:58:45.329287 5016 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53f61a25aa2d06bbe25ea6601eace5a58ae621720ec78c0d106b9b555898ebd1 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="83572e5b-69fa-4064-8b2f-cd1653a28754" containerName="nova-scheduler-scheduler" Dec 11 10:58:45 crc kubenswrapper[5016]: I1211 10:58:45.479915 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 10:58:45 crc kubenswrapper[5016]: I1211 10:58:45.646515 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmtxh\" (UniqueName: \"kubernetes.io/projected/83572e5b-69fa-4064-8b2f-cd1653a28754-kube-api-access-xmtxh\") pod \"83572e5b-69fa-4064-8b2f-cd1653a28754\" (UID: \"83572e5b-69fa-4064-8b2f-cd1653a28754\") " Dec 11 10:58:45 crc kubenswrapper[5016]: I1211 10:58:45.646606 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83572e5b-69fa-4064-8b2f-cd1653a28754-combined-ca-bundle\") pod \"83572e5b-69fa-4064-8b2f-cd1653a28754\" (UID: \"83572e5b-69fa-4064-8b2f-cd1653a28754\") " Dec 11 10:58:45 crc kubenswrapper[5016]: I1211 10:58:45.646644 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83572e5b-69fa-4064-8b2f-cd1653a28754-config-data\") pod \"83572e5b-69fa-4064-8b2f-cd1653a28754\" (UID: \"83572e5b-69fa-4064-8b2f-cd1653a28754\") " Dec 11 10:58:45 crc kubenswrapper[5016]: I1211 10:58:45.660161 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83572e5b-69fa-4064-8b2f-cd1653a28754-kube-api-access-xmtxh" (OuterVolumeSpecName: "kube-api-access-xmtxh") pod "83572e5b-69fa-4064-8b2f-cd1653a28754" (UID: "83572e5b-69fa-4064-8b2f-cd1653a28754"). InnerVolumeSpecName "kube-api-access-xmtxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:58:45 crc kubenswrapper[5016]: I1211 10:58:45.683038 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83572e5b-69fa-4064-8b2f-cd1653a28754-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83572e5b-69fa-4064-8b2f-cd1653a28754" (UID: "83572e5b-69fa-4064-8b2f-cd1653a28754"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:45 crc kubenswrapper[5016]: I1211 10:58:45.695839 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83572e5b-69fa-4064-8b2f-cd1653a28754-config-data" (OuterVolumeSpecName: "config-data") pod "83572e5b-69fa-4064-8b2f-cd1653a28754" (UID: "83572e5b-69fa-4064-8b2f-cd1653a28754"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:45 crc kubenswrapper[5016]: I1211 10:58:45.748995 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmtxh\" (UniqueName: \"kubernetes.io/projected/83572e5b-69fa-4064-8b2f-cd1653a28754-kube-api-access-xmtxh\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:45 crc kubenswrapper[5016]: I1211 10:58:45.749040 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83572e5b-69fa-4064-8b2f-cd1653a28754-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:45 crc kubenswrapper[5016]: I1211 10:58:45.749056 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83572e5b-69fa-4064-8b2f-cd1653a28754-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.028479 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"83572e5b-69fa-4064-8b2f-cd1653a28754","Type":"ContainerDied","Data":"5dd13aa0adc614425e2cc24f4e835054af41b5ccf960548b4812cc9a48d7b06d"} Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.028843 5016 scope.go:117] "RemoveContainer" containerID="53f61a25aa2d06bbe25ea6601eace5a58ae621720ec78c0d106b9b555898ebd1" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.028595 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.081137 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.096222 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.106694 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:58:46 crc kubenswrapper[5016]: E1211 10:58:46.107459 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83572e5b-69fa-4064-8b2f-cd1653a28754" containerName="nova-scheduler-scheduler" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.107482 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="83572e5b-69fa-4064-8b2f-cd1653a28754" containerName="nova-scheduler-scheduler" Dec 11 10:58:46 crc kubenswrapper[5016]: E1211 10:58:46.107509 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b475a2c8-d3b9-4b61-a58a-a806599c689a" containerName="nova-manage" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.107518 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="b475a2c8-d3b9-4b61-a58a-a806599c689a" containerName="nova-manage" Dec 11 10:58:46 crc kubenswrapper[5016]: E1211 10:58:46.107547 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1e43ec9-f406-4d7b-8928-3e3ae504973d" containerName="init" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.107555 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1e43ec9-f406-4d7b-8928-3e3ae504973d" containerName="init" Dec 11 10:58:46 crc kubenswrapper[5016]: E1211 10:58:46.107564 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1e43ec9-f406-4d7b-8928-3e3ae504973d" containerName="dnsmasq-dns" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.107571 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1e43ec9-f406-4d7b-8928-3e3ae504973d" containerName="dnsmasq-dns" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.107789 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1e43ec9-f406-4d7b-8928-3e3ae504973d" containerName="dnsmasq-dns" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.107808 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="b475a2c8-d3b9-4b61-a58a-a806599c689a" containerName="nova-manage" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.107817 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="83572e5b-69fa-4064-8b2f-cd1653a28754" containerName="nova-scheduler-scheduler" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.108717 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.116319 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.120332 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.256918 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.257024 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-config-data\") pod \"nova-scheduler-0\" (UID: \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.257500 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhrs9\" (UniqueName: \"kubernetes.io/projected/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-kube-api-access-dhrs9\") pod \"nova-scheduler-0\" (UID: \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.359259 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.359356 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-config-data\") pod \"nova-scheduler-0\" (UID: \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.359517 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhrs9\" (UniqueName: \"kubernetes.io/projected/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-kube-api-access-dhrs9\") pod \"nova-scheduler-0\" (UID: \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.364589 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.365250 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-config-data\") pod \"nova-scheduler-0\" (UID: \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.377879 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhrs9\" (UniqueName: \"kubernetes.io/projected/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-kube-api-access-dhrs9\") pod \"nova-scheduler-0\" (UID: \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\") " pod="openstack/nova-scheduler-0" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.437027 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 10:58:46 crc kubenswrapper[5016]: I1211 10:58:46.975694 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:58:47 crc kubenswrapper[5016]: I1211 10:58:47.039204 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9","Type":"ContainerStarted","Data":"2dcec8f69e883fa779276fe98254ed3bf5bcbb5a818704223fe0202a9aa4a5e8"} Dec 11 10:58:47 crc kubenswrapper[5016]: I1211 10:58:47.486317 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83572e5b-69fa-4064-8b2f-cd1653a28754" path="/var/lib/kubelet/pods/83572e5b-69fa-4064-8b2f-cd1653a28754/volumes" Dec 11 10:58:47 crc kubenswrapper[5016]: I1211 10:58:47.972721 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.059254 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9","Type":"ContainerStarted","Data":"6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5"} Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.062443 5016 generic.go:334] "Generic (PLEG): container finished" podID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" containerID="8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0" exitCode=0 Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.062483 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.062494 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee62f30c-abf1-4bd8-819f-5ba8d046b028","Type":"ContainerDied","Data":"8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0"} Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.062564 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ee62f30c-abf1-4bd8-819f-5ba8d046b028","Type":"ContainerDied","Data":"9ec169caa6f93a5756da45733284e58896a533a49945bfa789e7e3710fcf3599"} Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.062584 5016 scope.go:117] "RemoveContainer" containerID="8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.080078 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.080050568 podStartE2EDuration="2.080050568s" podCreationTimestamp="2025-12-11 10:58:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:58:48.077460586 +0000 UTC m=+1444.896020165" watchObservedRunningTime="2025-12-11 10:58:48.080050568 +0000 UTC m=+1444.898610147" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.095290 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee62f30c-abf1-4bd8-819f-5ba8d046b028-logs\") pod \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.095554 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee62f30c-abf1-4bd8-819f-5ba8d046b028-config-data\") pod \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.095846 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee62f30c-abf1-4bd8-819f-5ba8d046b028-combined-ca-bundle\") pod \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.095894 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gvzt\" (UniqueName: \"kubernetes.io/projected/ee62f30c-abf1-4bd8-819f-5ba8d046b028-kube-api-access-6gvzt\") pod \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\" (UID: \"ee62f30c-abf1-4bd8-819f-5ba8d046b028\") " Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.096801 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee62f30c-abf1-4bd8-819f-5ba8d046b028-logs" (OuterVolumeSpecName: "logs") pod "ee62f30c-abf1-4bd8-819f-5ba8d046b028" (UID: "ee62f30c-abf1-4bd8-819f-5ba8d046b028"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.101211 5016 scope.go:117] "RemoveContainer" containerID="96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.101360 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee62f30c-abf1-4bd8-819f-5ba8d046b028-kube-api-access-6gvzt" (OuterVolumeSpecName: "kube-api-access-6gvzt") pod "ee62f30c-abf1-4bd8-819f-5ba8d046b028" (UID: "ee62f30c-abf1-4bd8-819f-5ba8d046b028"). InnerVolumeSpecName "kube-api-access-6gvzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.129059 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee62f30c-abf1-4bd8-819f-5ba8d046b028-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee62f30c-abf1-4bd8-819f-5ba8d046b028" (UID: "ee62f30c-abf1-4bd8-819f-5ba8d046b028"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.129662 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee62f30c-abf1-4bd8-819f-5ba8d046b028-config-data" (OuterVolumeSpecName: "config-data") pod "ee62f30c-abf1-4bd8-819f-5ba8d046b028" (UID: "ee62f30c-abf1-4bd8-819f-5ba8d046b028"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.198830 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee62f30c-abf1-4bd8-819f-5ba8d046b028-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.199092 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee62f30c-abf1-4bd8-819f-5ba8d046b028-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.199178 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee62f30c-abf1-4bd8-819f-5ba8d046b028-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.199248 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gvzt\" (UniqueName: \"kubernetes.io/projected/ee62f30c-abf1-4bd8-819f-5ba8d046b028-kube-api-access-6gvzt\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.223669 5016 scope.go:117] "RemoveContainer" containerID="8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0" Dec 11 10:58:48 crc kubenswrapper[5016]: E1211 10:58:48.224278 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0\": container with ID starting with 8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0 not found: ID does not exist" containerID="8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.224325 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0"} err="failed to get container status \"8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0\": rpc error: code = NotFound desc = could not find container \"8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0\": container with ID starting with 8cbe249ae3d68abfaf4fdaf73d1fc2fdb8d1f33f40245541c7df401411a2f8d0 not found: ID does not exist" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.224363 5016 scope.go:117] "RemoveContainer" containerID="96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b" Dec 11 10:58:48 crc kubenswrapper[5016]: E1211 10:58:48.224864 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b\": container with ID starting with 96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b not found: ID does not exist" containerID="96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.225013 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b"} err="failed to get container status \"96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b\": rpc error: code = NotFound desc = could not find container \"96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b\": container with ID starting with 96b2cb795fa5d252a29d9ac2ed50d0b37890149a8c59cc96bcbcc7baccfc325b not found: ID does not exist" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.395282 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.410467 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.428077 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 11 10:58:48 crc kubenswrapper[5016]: E1211 10:58:48.428750 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" containerName="nova-api-log" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.428775 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" containerName="nova-api-log" Dec 11 10:58:48 crc kubenswrapper[5016]: E1211 10:58:48.428789 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" containerName="nova-api-api" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.428798 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" containerName="nova-api-api" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.429085 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" containerName="nova-api-api" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.429331 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" containerName="nova-api-log" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.431278 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.437743 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.442958 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.605964 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-logs\") pod \"nova-api-0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.606026 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-config-data\") pod \"nova-api-0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.606162 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrfh6\" (UniqueName: \"kubernetes.io/projected/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-kube-api-access-vrfh6\") pod \"nova-api-0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.606182 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.708043 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-logs\") pod \"nova-api-0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.708374 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-config-data\") pod \"nova-api-0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.708506 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrfh6\" (UniqueName: \"kubernetes.io/projected/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-kube-api-access-vrfh6\") pod \"nova-api-0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.708535 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.709650 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-logs\") pod \"nova-api-0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.712633 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.717998 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-config-data\") pod \"nova-api-0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.726582 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrfh6\" (UniqueName: \"kubernetes.io/projected/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-kube-api-access-vrfh6\") pod \"nova-api-0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " pod="openstack/nova-api-0" Dec 11 10:58:48 crc kubenswrapper[5016]: I1211 10:58:48.763714 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:58:49 crc kubenswrapper[5016]: I1211 10:58:49.232129 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:58:49 crc kubenswrapper[5016]: I1211 10:58:49.493929 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee62f30c-abf1-4bd8-819f-5ba8d046b028" path="/var/lib/kubelet/pods/ee62f30c-abf1-4bd8-819f-5ba8d046b028/volumes" Dec 11 10:58:50 crc kubenswrapper[5016]: I1211 10:58:50.086346 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0","Type":"ContainerStarted","Data":"c229de9c004a54211eaf5828d07e49ec0470c22b53b10cc1a36d262b14264d05"} Dec 11 10:58:50 crc kubenswrapper[5016]: I1211 10:58:50.086677 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0","Type":"ContainerStarted","Data":"4f646bb61b0dc907159e72126606847729f4d780851bce75cdcacf86e8debfd2"} Dec 11 10:58:50 crc kubenswrapper[5016]: I1211 10:58:50.086694 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0","Type":"ContainerStarted","Data":"090c50cfd1f53eca65fd596169022d12b7b8551b66007ee6b14b7129dd1598bf"} Dec 11 10:58:50 crc kubenswrapper[5016]: I1211 10:58:50.088563 5016 generic.go:334] "Generic (PLEG): container finished" podID="80b378da-6397-4b63-8eb4-3a2a465e6425" containerID="b38178e5c54cc99963dad2af4beb7cc0015ee856d538a7f49d2b020adb5b4709" exitCode=0 Dec 11 10:58:50 crc kubenswrapper[5016]: I1211 10:58:50.088716 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-d5pmm" event={"ID":"80b378da-6397-4b63-8eb4-3a2a465e6425","Type":"ContainerDied","Data":"b38178e5c54cc99963dad2af4beb7cc0015ee856d538a7f49d2b020adb5b4709"} Dec 11 10:58:50 crc kubenswrapper[5016]: I1211 10:58:50.145648 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.145618078 podStartE2EDuration="2.145618078s" podCreationTimestamp="2025-12-11 10:58:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:58:50.118108215 +0000 UTC m=+1446.936667794" watchObservedRunningTime="2025-12-11 10:58:50.145618078 +0000 UTC m=+1446.964177667" Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.437924 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.574092 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.682881 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95rcn\" (UniqueName: \"kubernetes.io/projected/80b378da-6397-4b63-8eb4-3a2a465e6425-kube-api-access-95rcn\") pod \"80b378da-6397-4b63-8eb4-3a2a465e6425\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.684427 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-combined-ca-bundle\") pod \"80b378da-6397-4b63-8eb4-3a2a465e6425\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.684506 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-config-data\") pod \"80b378da-6397-4b63-8eb4-3a2a465e6425\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.684588 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-scripts\") pod \"80b378da-6397-4b63-8eb4-3a2a465e6425\" (UID: \"80b378da-6397-4b63-8eb4-3a2a465e6425\") " Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.747512 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80b378da-6397-4b63-8eb4-3a2a465e6425-kube-api-access-95rcn" (OuterVolumeSpecName: "kube-api-access-95rcn") pod "80b378da-6397-4b63-8eb4-3a2a465e6425" (UID: "80b378da-6397-4b63-8eb4-3a2a465e6425"). InnerVolumeSpecName "kube-api-access-95rcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.747682 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-scripts" (OuterVolumeSpecName: "scripts") pod "80b378da-6397-4b63-8eb4-3a2a465e6425" (UID: "80b378da-6397-4b63-8eb4-3a2a465e6425"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.787645 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.787686 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95rcn\" (UniqueName: \"kubernetes.io/projected/80b378da-6397-4b63-8eb4-3a2a465e6425-kube-api-access-95rcn\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.802242 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-config-data" (OuterVolumeSpecName: "config-data") pod "80b378da-6397-4b63-8eb4-3a2a465e6425" (UID: "80b378da-6397-4b63-8eb4-3a2a465e6425"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.821010 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "80b378da-6397-4b63-8eb4-3a2a465e6425" (UID: "80b378da-6397-4b63-8eb4-3a2a465e6425"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.889829 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:51 crc kubenswrapper[5016]: I1211 10:58:51.889872 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80b378da-6397-4b63-8eb4-3a2a465e6425-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.114012 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-d5pmm" event={"ID":"80b378da-6397-4b63-8eb4-3a2a465e6425","Type":"ContainerDied","Data":"1fc6eb7d045ac479473a51cd688569b83f8daa105c424edf8ad32c9b873e29fe"} Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.114063 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc6eb7d045ac479473a51cd688569b83f8daa105c424edf8ad32c9b873e29fe" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.114155 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-d5pmm" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.235513 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 11 10:58:52 crc kubenswrapper[5016]: E1211 10:58:52.236172 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80b378da-6397-4b63-8eb4-3a2a465e6425" containerName="nova-cell1-conductor-db-sync" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.236196 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="80b378da-6397-4b63-8eb4-3a2a465e6425" containerName="nova-cell1-conductor-db-sync" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.236459 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="80b378da-6397-4b63-8eb4-3a2a465e6425" containerName="nova-cell1-conductor-db-sync" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.237469 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.241167 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.251515 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.406694 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78drz\" (UniqueName: \"kubernetes.io/projected/9b21ed74-421c-4bbc-b17e-317beee96ae7-kube-api-access-78drz\") pod \"nova-cell1-conductor-0\" (UID: \"9b21ed74-421c-4bbc-b17e-317beee96ae7\") " pod="openstack/nova-cell1-conductor-0" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.406741 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b21ed74-421c-4bbc-b17e-317beee96ae7-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"9b21ed74-421c-4bbc-b17e-317beee96ae7\") " pod="openstack/nova-cell1-conductor-0" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.406783 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b21ed74-421c-4bbc-b17e-317beee96ae7-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"9b21ed74-421c-4bbc-b17e-317beee96ae7\") " pod="openstack/nova-cell1-conductor-0" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.508304 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78drz\" (UniqueName: \"kubernetes.io/projected/9b21ed74-421c-4bbc-b17e-317beee96ae7-kube-api-access-78drz\") pod \"nova-cell1-conductor-0\" (UID: \"9b21ed74-421c-4bbc-b17e-317beee96ae7\") " pod="openstack/nova-cell1-conductor-0" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.508366 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b21ed74-421c-4bbc-b17e-317beee96ae7-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"9b21ed74-421c-4bbc-b17e-317beee96ae7\") " pod="openstack/nova-cell1-conductor-0" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.508451 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b21ed74-421c-4bbc-b17e-317beee96ae7-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"9b21ed74-421c-4bbc-b17e-317beee96ae7\") " pod="openstack/nova-cell1-conductor-0" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.517588 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b21ed74-421c-4bbc-b17e-317beee96ae7-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"9b21ed74-421c-4bbc-b17e-317beee96ae7\") " pod="openstack/nova-cell1-conductor-0" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.521106 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b21ed74-421c-4bbc-b17e-317beee96ae7-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"9b21ed74-421c-4bbc-b17e-317beee96ae7\") " pod="openstack/nova-cell1-conductor-0" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.540048 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78drz\" (UniqueName: \"kubernetes.io/projected/9b21ed74-421c-4bbc-b17e-317beee96ae7-kube-api-access-78drz\") pod \"nova-cell1-conductor-0\" (UID: \"9b21ed74-421c-4bbc-b17e-317beee96ae7\") " pod="openstack/nova-cell1-conductor-0" Dec 11 10:58:52 crc kubenswrapper[5016]: I1211 10:58:52.562380 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 11 10:58:53 crc kubenswrapper[5016]: I1211 10:58:53.050832 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 11 10:58:53 crc kubenswrapper[5016]: W1211 10:58:53.056765 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b21ed74_421c_4bbc_b17e_317beee96ae7.slice/crio-e428db14e28e87eb0ca2dc322654a9aa9c44433c84708812d669fab6c00561c6 WatchSource:0}: Error finding container e428db14e28e87eb0ca2dc322654a9aa9c44433c84708812d669fab6c00561c6: Status 404 returned error can't find the container with id e428db14e28e87eb0ca2dc322654a9aa9c44433c84708812d669fab6c00561c6 Dec 11 10:58:53 crc kubenswrapper[5016]: I1211 10:58:53.126026 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"9b21ed74-421c-4bbc-b17e-317beee96ae7","Type":"ContainerStarted","Data":"e428db14e28e87eb0ca2dc322654a9aa9c44433c84708812d669fab6c00561c6"} Dec 11 10:58:54 crc kubenswrapper[5016]: I1211 10:58:54.138184 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"9b21ed74-421c-4bbc-b17e-317beee96ae7","Type":"ContainerStarted","Data":"53fb5bffea25b597de82875b5ea57455e909b02b22f6099e7b865dc7fd951010"} Dec 11 10:58:54 crc kubenswrapper[5016]: I1211 10:58:54.138489 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Dec 11 10:58:54 crc kubenswrapper[5016]: I1211 10:58:54.155760 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.155738701 podStartE2EDuration="2.155738701s" podCreationTimestamp="2025-12-11 10:58:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:58:54.155422272 +0000 UTC m=+1450.973981851" watchObservedRunningTime="2025-12-11 10:58:54.155738701 +0000 UTC m=+1450.974298280" Dec 11 10:58:56 crc kubenswrapper[5016]: I1211 10:58:56.437830 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 11 10:58:56 crc kubenswrapper[5016]: I1211 10:58:56.468932 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 11 10:58:57 crc kubenswrapper[5016]: I1211 10:58:57.199830 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 11 10:58:58 crc kubenswrapper[5016]: I1211 10:58:58.063813 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 11 10:58:58 crc kubenswrapper[5016]: I1211 10:58:58.764290 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 10:58:58 crc kubenswrapper[5016]: I1211 10:58:58.764348 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 10:58:59 crc kubenswrapper[5016]: I1211 10:58:59.847153 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.189:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 10:58:59 crc kubenswrapper[5016]: I1211 10:58:59.847153 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.189:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 10:59:01 crc kubenswrapper[5016]: I1211 10:59:01.776755 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 10:59:01 crc kubenswrapper[5016]: I1211 10:59:01.777444 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="070a458f-4698-4701-903e-a64d4dc7d95a" containerName="kube-state-metrics" containerID="cri-o://53ac27903523f9bbe2bac6028e86dfda9477868b2a3454901c341f8555e89b74" gracePeriod=30 Dec 11 10:59:02 crc kubenswrapper[5016]: I1211 10:59:02.215579 5016 generic.go:334] "Generic (PLEG): container finished" podID="070a458f-4698-4701-903e-a64d4dc7d95a" containerID="53ac27903523f9bbe2bac6028e86dfda9477868b2a3454901c341f8555e89b74" exitCode=2 Dec 11 10:59:02 crc kubenswrapper[5016]: I1211 10:59:02.215623 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"070a458f-4698-4701-903e-a64d4dc7d95a","Type":"ContainerDied","Data":"53ac27903523f9bbe2bac6028e86dfda9477868b2a3454901c341f8555e89b74"} Dec 11 10:59:02 crc kubenswrapper[5016]: I1211 10:59:02.610399 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Dec 11 10:59:03 crc kubenswrapper[5016]: I1211 10:59:03.365168 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 11 10:59:03 crc kubenswrapper[5016]: I1211 10:59:03.551084 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pjbh\" (UniqueName: \"kubernetes.io/projected/070a458f-4698-4701-903e-a64d4dc7d95a-kube-api-access-4pjbh\") pod \"070a458f-4698-4701-903e-a64d4dc7d95a\" (UID: \"070a458f-4698-4701-903e-a64d4dc7d95a\") " Dec 11 10:59:03 crc kubenswrapper[5016]: I1211 10:59:03.559413 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/070a458f-4698-4701-903e-a64d4dc7d95a-kube-api-access-4pjbh" (OuterVolumeSpecName: "kube-api-access-4pjbh") pod "070a458f-4698-4701-903e-a64d4dc7d95a" (UID: "070a458f-4698-4701-903e-a64d4dc7d95a"). InnerVolumeSpecName "kube-api-access-4pjbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:59:03 crc kubenswrapper[5016]: I1211 10:59:03.653759 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pjbh\" (UniqueName: \"kubernetes.io/projected/070a458f-4698-4701-903e-a64d4dc7d95a-kube-api-access-4pjbh\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:03 crc kubenswrapper[5016]: I1211 10:59:03.830712 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:59:03 crc kubenswrapper[5016]: I1211 10:59:03.831088 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="ceilometer-central-agent" containerID="cri-o://adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f" gracePeriod=30 Dec 11 10:59:03 crc kubenswrapper[5016]: I1211 10:59:03.831201 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="ceilometer-notification-agent" containerID="cri-o://7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610" gracePeriod=30 Dec 11 10:59:03 crc kubenswrapper[5016]: I1211 10:59:03.831212 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="proxy-httpd" containerID="cri-o://24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7" gracePeriod=30 Dec 11 10:59:03 crc kubenswrapper[5016]: I1211 10:59:03.831224 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="sg-core" containerID="cri-o://1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b" gracePeriod=30 Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.249586 5016 generic.go:334] "Generic (PLEG): container finished" podID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerID="24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7" exitCode=0 Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.249690 5016 generic.go:334] "Generic (PLEG): container finished" podID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerID="1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b" exitCode=2 Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.249695 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"387799aa-2f39-4fb3-8d62-225c9e3dcf47","Type":"ContainerDied","Data":"24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7"} Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.249787 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"387799aa-2f39-4fb3-8d62-225c9e3dcf47","Type":"ContainerDied","Data":"1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b"} Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.254133 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.254293 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"070a458f-4698-4701-903e-a64d4dc7d95a","Type":"ContainerDied","Data":"840a15d932c99263407454fa1d2e650027c5974ec646b004fdeb57063524b41a"} Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.254380 5016 scope.go:117] "RemoveContainer" containerID="53ac27903523f9bbe2bac6028e86dfda9477868b2a3454901c341f8555e89b74" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.345185 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.355517 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.366674 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 10:59:04 crc kubenswrapper[5016]: E1211 10:59:04.367212 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="070a458f-4698-4701-903e-a64d4dc7d95a" containerName="kube-state-metrics" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.367228 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="070a458f-4698-4701-903e-a64d4dc7d95a" containerName="kube-state-metrics" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.367486 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="070a458f-4698-4701-903e-a64d4dc7d95a" containerName="kube-state-metrics" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.368294 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.370602 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.370873 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.383258 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.472495 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/1f3e95ec-e5d3-44ab-ae44-1279b0a04e75-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75\") " pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.472591 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f3e95ec-e5d3-44ab-ae44-1279b0a04e75-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75\") " pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.472667 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rf58\" (UniqueName: \"kubernetes.io/projected/1f3e95ec-e5d3-44ab-ae44-1279b0a04e75-kube-api-access-2rf58\") pod \"kube-state-metrics-0\" (UID: \"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75\") " pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.472717 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/1f3e95ec-e5d3-44ab-ae44-1279b0a04e75-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75\") " pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.575210 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f3e95ec-e5d3-44ab-ae44-1279b0a04e75-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75\") " pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.575639 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rf58\" (UniqueName: \"kubernetes.io/projected/1f3e95ec-e5d3-44ab-ae44-1279b0a04e75-kube-api-access-2rf58\") pod \"kube-state-metrics-0\" (UID: \"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75\") " pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.575725 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/1f3e95ec-e5d3-44ab-ae44-1279b0a04e75-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75\") " pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.575903 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/1f3e95ec-e5d3-44ab-ae44-1279b0a04e75-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75\") " pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.581932 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/1f3e95ec-e5d3-44ab-ae44-1279b0a04e75-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75\") " pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.582650 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f3e95ec-e5d3-44ab-ae44-1279b0a04e75-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75\") " pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.586525 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/1f3e95ec-e5d3-44ab-ae44-1279b0a04e75-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75\") " pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.600031 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rf58\" (UniqueName: \"kubernetes.io/projected/1f3e95ec-e5d3-44ab-ae44-1279b0a04e75-kube-api-access-2rf58\") pod \"kube-state-metrics-0\" (UID: \"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75\") " pod="openstack/kube-state-metrics-0" Dec 11 10:59:04 crc kubenswrapper[5016]: I1211 10:59:04.692306 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.179780 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 11 10:59:05 crc kubenswrapper[5016]: W1211 10:59:05.189139 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1f3e95ec_e5d3_44ab_ae44_1279b0a04e75.slice/crio-0b94b785428f37082c04abb4b35850a6f0e8859a77e157f70dd3092ca0b17dd1 WatchSource:0}: Error finding container 0b94b785428f37082c04abb4b35850a6f0e8859a77e157f70dd3092ca0b17dd1: Status 404 returned error can't find the container with id 0b94b785428f37082c04abb4b35850a6f0e8859a77e157f70dd3092ca0b17dd1 Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.244690 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.270053 5016 generic.go:334] "Generic (PLEG): container finished" podID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerID="7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610" exitCode=0 Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.270089 5016 generic.go:334] "Generic (PLEG): container finished" podID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerID="adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f" exitCode=0 Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.270150 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"387799aa-2f39-4fb3-8d62-225c9e3dcf47","Type":"ContainerDied","Data":"7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610"} Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.270182 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"387799aa-2f39-4fb3-8d62-225c9e3dcf47","Type":"ContainerDied","Data":"adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f"} Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.270185 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.270207 5016 scope.go:117] "RemoveContainer" containerID="24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.270193 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"387799aa-2f39-4fb3-8d62-225c9e3dcf47","Type":"ContainerDied","Data":"d2b22cd4d32343643c71e4c54cdff91036ac3a6d5eb35f3577ca38e79c27aaf3"} Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.272239 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75","Type":"ContainerStarted","Data":"0b94b785428f37082c04abb4b35850a6f0e8859a77e157f70dd3092ca0b17dd1"} Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.342190 5016 scope.go:117] "RemoveContainer" containerID="1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.371106 5016 scope.go:117] "RemoveContainer" containerID="7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.394666 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/387799aa-2f39-4fb3-8d62-225c9e3dcf47-run-httpd\") pod \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.394764 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/387799aa-2f39-4fb3-8d62-225c9e3dcf47-log-httpd\") pod \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.394803 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-sg-core-conf-yaml\") pod \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.394830 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7g9mv\" (UniqueName: \"kubernetes.io/projected/387799aa-2f39-4fb3-8d62-225c9e3dcf47-kube-api-access-7g9mv\") pod \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.394888 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-combined-ca-bundle\") pod \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.394923 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-scripts\") pod \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.395024 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-config-data\") pod \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\" (UID: \"387799aa-2f39-4fb3-8d62-225c9e3dcf47\") " Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.395375 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/387799aa-2f39-4fb3-8d62-225c9e3dcf47-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "387799aa-2f39-4fb3-8d62-225c9e3dcf47" (UID: "387799aa-2f39-4fb3-8d62-225c9e3dcf47"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.396220 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/387799aa-2f39-4fb3-8d62-225c9e3dcf47-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "387799aa-2f39-4fb3-8d62-225c9e3dcf47" (UID: "387799aa-2f39-4fb3-8d62-225c9e3dcf47"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.402296 5016 scope.go:117] "RemoveContainer" containerID="adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.420020 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-scripts" (OuterVolumeSpecName: "scripts") pod "387799aa-2f39-4fb3-8d62-225c9e3dcf47" (UID: "387799aa-2f39-4fb3-8d62-225c9e3dcf47"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.420431 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/387799aa-2f39-4fb3-8d62-225c9e3dcf47-kube-api-access-7g9mv" (OuterVolumeSpecName: "kube-api-access-7g9mv") pod "387799aa-2f39-4fb3-8d62-225c9e3dcf47" (UID: "387799aa-2f39-4fb3-8d62-225c9e3dcf47"). InnerVolumeSpecName "kube-api-access-7g9mv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.431114 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "387799aa-2f39-4fb3-8d62-225c9e3dcf47" (UID: "387799aa-2f39-4fb3-8d62-225c9e3dcf47"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.491935 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="070a458f-4698-4701-903e-a64d4dc7d95a" path="/var/lib/kubelet/pods/070a458f-4698-4701-903e-a64d4dc7d95a/volumes" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.497433 5016 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/387799aa-2f39-4fb3-8d62-225c9e3dcf47-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.497470 5016 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/387799aa-2f39-4fb3-8d62-225c9e3dcf47-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.497481 5016 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.497493 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7g9mv\" (UniqueName: \"kubernetes.io/projected/387799aa-2f39-4fb3-8d62-225c9e3dcf47-kube-api-access-7g9mv\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.497502 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.509359 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-config-data" (OuterVolumeSpecName: "config-data") pod "387799aa-2f39-4fb3-8d62-225c9e3dcf47" (UID: "387799aa-2f39-4fb3-8d62-225c9e3dcf47"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.510674 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "387799aa-2f39-4fb3-8d62-225c9e3dcf47" (UID: "387799aa-2f39-4fb3-8d62-225c9e3dcf47"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.600752 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.600794 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/387799aa-2f39-4fb3-8d62-225c9e3dcf47-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.635618 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.661418 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.678614 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:59:05 crc kubenswrapper[5016]: E1211 10:59:05.679180 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="sg-core" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.679199 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="sg-core" Dec 11 10:59:05 crc kubenswrapper[5016]: E1211 10:59:05.679222 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="ceilometer-notification-agent" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.679230 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="ceilometer-notification-agent" Dec 11 10:59:05 crc kubenswrapper[5016]: E1211 10:59:05.679241 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="ceilometer-central-agent" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.679247 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="ceilometer-central-agent" Dec 11 10:59:05 crc kubenswrapper[5016]: E1211 10:59:05.679269 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="proxy-httpd" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.679274 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="proxy-httpd" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.681315 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="sg-core" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.681344 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="ceilometer-notification-agent" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.681368 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="ceilometer-central-agent" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.681383 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" containerName="proxy-httpd" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.684434 5016 scope.go:117] "RemoveContainer" containerID="24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.685071 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: E1211 10:59:05.686283 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7\": container with ID starting with 24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7 not found: ID does not exist" containerID="24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.686319 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7"} err="failed to get container status \"24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7\": rpc error: code = NotFound desc = could not find container \"24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7\": container with ID starting with 24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7 not found: ID does not exist" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.686356 5016 scope.go:117] "RemoveContainer" containerID="1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b" Dec 11 10:59:05 crc kubenswrapper[5016]: E1211 10:59:05.689175 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b\": container with ID starting with 1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b not found: ID does not exist" containerID="1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.689216 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b"} err="failed to get container status \"1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b\": rpc error: code = NotFound desc = could not find container \"1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b\": container with ID starting with 1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b not found: ID does not exist" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.689239 5016 scope.go:117] "RemoveContainer" containerID="7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610" Dec 11 10:59:05 crc kubenswrapper[5016]: E1211 10:59:05.689844 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610\": container with ID starting with 7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610 not found: ID does not exist" containerID="7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.689867 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610"} err="failed to get container status \"7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610\": rpc error: code = NotFound desc = could not find container \"7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610\": container with ID starting with 7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610 not found: ID does not exist" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.689886 5016 scope.go:117] "RemoveContainer" containerID="adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f" Dec 11 10:59:05 crc kubenswrapper[5016]: E1211 10:59:05.690176 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f\": container with ID starting with adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f not found: ID does not exist" containerID="adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.690201 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f"} err="failed to get container status \"adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f\": rpc error: code = NotFound desc = could not find container \"adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f\": container with ID starting with adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f not found: ID does not exist" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.690217 5016 scope.go:117] "RemoveContainer" containerID="24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.690478 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7"} err="failed to get container status \"24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7\": rpc error: code = NotFound desc = could not find container \"24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7\": container with ID starting with 24c800406dd71b0fe02bdea4f2b8507ea2a0aba0ba2e52cbb08efa1e0adf10d7 not found: ID does not exist" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.690501 5016 scope.go:117] "RemoveContainer" containerID="1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.691496 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b"} err="failed to get container status \"1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b\": rpc error: code = NotFound desc = could not find container \"1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b\": container with ID starting with 1e00973e53694a13169972fd098ff25737f51130ac5e0df717a22b3d50e51b8b not found: ID does not exist" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.691525 5016 scope.go:117] "RemoveContainer" containerID="7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.692567 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610"} err="failed to get container status \"7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610\": rpc error: code = NotFound desc = could not find container \"7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610\": container with ID starting with 7e3b1bebf96d898b220a628627c1298c87aab1d3bd320267906924bb88d0c610 not found: ID does not exist" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.692589 5016 scope.go:117] "RemoveContainer" containerID="adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.693112 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f"} err="failed to get container status \"adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f\": rpc error: code = NotFound desc = could not find container \"adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f\": container with ID starting with adb6819c6a86b85315099acab74dbe7a0a43e0c63c58751e9430d2af900c0f4f not found: ID does not exist" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.695072 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.695367 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.695361 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.695447 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.805689 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.805764 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-config-data\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.806096 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cfc02da-1137-4a3d-bea6-40a80cbf2037-log-httpd\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.806193 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cfc02da-1137-4a3d-bea6-40a80cbf2037-run-httpd\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.806342 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.806567 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.806675 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5kt4\" (UniqueName: \"kubernetes.io/projected/1cfc02da-1137-4a3d-bea6-40a80cbf2037-kube-api-access-z5kt4\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.807062 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-scripts\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.910039 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.910128 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.910151 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5kt4\" (UniqueName: \"kubernetes.io/projected/1cfc02da-1137-4a3d-bea6-40a80cbf2037-kube-api-access-z5kt4\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.910216 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-scripts\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.910287 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.910315 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-config-data\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.910341 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cfc02da-1137-4a3d-bea6-40a80cbf2037-log-httpd\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.910364 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cfc02da-1137-4a3d-bea6-40a80cbf2037-run-httpd\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.910839 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cfc02da-1137-4a3d-bea6-40a80cbf2037-run-httpd\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.914568 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cfc02da-1137-4a3d-bea6-40a80cbf2037-log-httpd\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.915588 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-config-data\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.917746 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.918124 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.918254 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.918386 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-scripts\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:05 crc kubenswrapper[5016]: I1211 10:59:05.936830 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5kt4\" (UniqueName: \"kubernetes.io/projected/1cfc02da-1137-4a3d-bea6-40a80cbf2037-kube-api-access-z5kt4\") pod \"ceilometer-0\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " pod="openstack/ceilometer-0" Dec 11 10:59:06 crc kubenswrapper[5016]: I1211 10:59:06.010090 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:59:06 crc kubenswrapper[5016]: I1211 10:59:06.291290 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1f3e95ec-e5d3-44ab-ae44-1279b0a04e75","Type":"ContainerStarted","Data":"e5543b89d2dd0bcef88a6fcacd8cba16f9f0ec7e2adc624026e02e41631192cf"} Dec 11 10:59:06 crc kubenswrapper[5016]: I1211 10:59:06.291868 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 11 10:59:06 crc kubenswrapper[5016]: I1211 10:59:06.314294 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.936997591 podStartE2EDuration="2.314265078s" podCreationTimestamp="2025-12-11 10:59:04 +0000 UTC" firstStartedPulling="2025-12-11 10:59:05.191891114 +0000 UTC m=+1462.010450703" lastFinishedPulling="2025-12-11 10:59:05.569158611 +0000 UTC m=+1462.387718190" observedRunningTime="2025-12-11 10:59:06.308808837 +0000 UTC m=+1463.127368426" watchObservedRunningTime="2025-12-11 10:59:06.314265078 +0000 UTC m=+1463.132824657" Dec 11 10:59:06 crc kubenswrapper[5016]: W1211 10:59:06.578804 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1cfc02da_1137_4a3d_bea6_40a80cbf2037.slice/crio-8d6f00c84273ca1a2c2c0cdc404331e2896ca8148fe96cf1f17ad6073e7d5264 WatchSource:0}: Error finding container 8d6f00c84273ca1a2c2c0cdc404331e2896ca8148fe96cf1f17ad6073e7d5264: Status 404 returned error can't find the container with id 8d6f00c84273ca1a2c2c0cdc404331e2896ca8148fe96cf1f17ad6073e7d5264 Dec 11 10:59:06 crc kubenswrapper[5016]: I1211 10:59:06.581313 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.301019 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cfc02da-1137-4a3d-bea6-40a80cbf2037","Type":"ContainerStarted","Data":"8d6f00c84273ca1a2c2c0cdc404331e2896ca8148fe96cf1f17ad6073e7d5264"} Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.303554 5016 generic.go:334] "Generic (PLEG): container finished" podID="13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" containerID="8101fffa9dc16ab88014bd0f7a4f041d13bb050825e8529dfd12a4d80a357a5c" exitCode=137 Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.303626 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27","Type":"ContainerDied","Data":"8101fffa9dc16ab88014bd0f7a4f041d13bb050825e8529dfd12a4d80a357a5c"} Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.306724 5016 generic.go:334] "Generic (PLEG): container finished" podID="97a17b46-eb85-4f0f-a6ea-9db9871f48df" containerID="c80f9b2b2b6fbafeb311b1508cda8a626cdeb1473cef03e42f3c3343617d0bab" exitCode=137 Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.306778 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"97a17b46-eb85-4f0f-a6ea-9db9871f48df","Type":"ContainerDied","Data":"c80f9b2b2b6fbafeb311b1508cda8a626cdeb1473cef03e42f3c3343617d0bab"} Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.488595 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="387799aa-2f39-4fb3-8d62-225c9e3dcf47" path="/var/lib/kubelet/pods/387799aa-2f39-4fb3-8d62-225c9e3dcf47/volumes" Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.817633 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.828056 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.949854 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-config-data\") pod \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.950015 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nnf5w\" (UniqueName: \"kubernetes.io/projected/97a17b46-eb85-4f0f-a6ea-9db9871f48df-kube-api-access-nnf5w\") pod \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\" (UID: \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\") " Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.950055 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqzch\" (UniqueName: \"kubernetes.io/projected/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-kube-api-access-kqzch\") pod \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.950088 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a17b46-eb85-4f0f-a6ea-9db9871f48df-config-data\") pod \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\" (UID: \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\") " Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.950210 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a17b46-eb85-4f0f-a6ea-9db9871f48df-combined-ca-bundle\") pod \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\" (UID: \"97a17b46-eb85-4f0f-a6ea-9db9871f48df\") " Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.950247 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-logs\") pod \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.950285 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-combined-ca-bundle\") pod \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\" (UID: \"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27\") " Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.955058 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-logs" (OuterVolumeSpecName: "logs") pod "13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" (UID: "13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.959415 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-kube-api-access-kqzch" (OuterVolumeSpecName: "kube-api-access-kqzch") pod "13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" (UID: "13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27"). InnerVolumeSpecName "kube-api-access-kqzch". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.959566 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97a17b46-eb85-4f0f-a6ea-9db9871f48df-kube-api-access-nnf5w" (OuterVolumeSpecName: "kube-api-access-nnf5w") pod "97a17b46-eb85-4f0f-a6ea-9db9871f48df" (UID: "97a17b46-eb85-4f0f-a6ea-9db9871f48df"). InnerVolumeSpecName "kube-api-access-nnf5w". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.985578 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-config-data" (OuterVolumeSpecName: "config-data") pod "13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" (UID: "13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.985629 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" (UID: "13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.991120 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a17b46-eb85-4f0f-a6ea-9db9871f48df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97a17b46-eb85-4f0f-a6ea-9db9871f48df" (UID: "97a17b46-eb85-4f0f-a6ea-9db9871f48df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:07 crc kubenswrapper[5016]: I1211 10:59:07.992792 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a17b46-eb85-4f0f-a6ea-9db9871f48df-config-data" (OuterVolumeSpecName: "config-data") pod "97a17b46-eb85-4f0f-a6ea-9db9871f48df" (UID: "97a17b46-eb85-4f0f-a6ea-9db9871f48df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.054437 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.054615 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nnf5w\" (UniqueName: \"kubernetes.io/projected/97a17b46-eb85-4f0f-a6ea-9db9871f48df-kube-api-access-nnf5w\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.054847 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqzch\" (UniqueName: \"kubernetes.io/projected/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-kube-api-access-kqzch\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.055973 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a17b46-eb85-4f0f-a6ea-9db9871f48df-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.056053 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a17b46-eb85-4f0f-a6ea-9db9871f48df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.056134 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.056201 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.328368 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cfc02da-1137-4a3d-bea6-40a80cbf2037","Type":"ContainerStarted","Data":"44e44849b2763e2f20ac5e148a9f79befef1a11e834ee2ed9879d9d7d4c10e74"} Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.328725 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cfc02da-1137-4a3d-bea6-40a80cbf2037","Type":"ContainerStarted","Data":"d14b9111a972dc5b09e116bd69ea02e2636fe3de07b9d09dbc6f3a90594b0f8f"} Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.331525 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27","Type":"ContainerDied","Data":"56444b31db778d77b7241563d3108c4ae322ce7b1d7371f1dc685128e73be880"} Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.331571 5016 scope.go:117] "RemoveContainer" containerID="8101fffa9dc16ab88014bd0f7a4f041d13bb050825e8529dfd12a4d80a357a5c" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.331577 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.336186 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"97a17b46-eb85-4f0f-a6ea-9db9871f48df","Type":"ContainerDied","Data":"98d2ecf8613fe31b2b5ebbba30fa514c1e93c2ddfa8a5c71c4a3e198689e1b79"} Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.336282 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.389140 5016 scope.go:117] "RemoveContainer" containerID="9c9604449c8ccf4d833a48163613ecce9140a69b7581c202587de7e1985abbdb" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.407329 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.422498 5016 scope.go:117] "RemoveContainer" containerID="c80f9b2b2b6fbafeb311b1508cda8a626cdeb1473cef03e42f3c3343617d0bab" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.435026 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.446081 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.455148 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.463295 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:59:08 crc kubenswrapper[5016]: E1211 10:59:08.463825 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" containerName="nova-metadata-log" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.463849 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" containerName="nova-metadata-log" Dec 11 10:59:08 crc kubenswrapper[5016]: E1211 10:59:08.463881 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" containerName="nova-metadata-metadata" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.463888 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" containerName="nova-metadata-metadata" Dec 11 10:59:08 crc kubenswrapper[5016]: E1211 10:59:08.463913 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97a17b46-eb85-4f0f-a6ea-9db9871f48df" containerName="nova-cell1-novncproxy-novncproxy" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.463920 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="97a17b46-eb85-4f0f-a6ea-9db9871f48df" containerName="nova-cell1-novncproxy-novncproxy" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.464140 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" containerName="nova-metadata-metadata" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.464161 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" containerName="nova-metadata-log" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.464170 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="97a17b46-eb85-4f0f-a6ea-9db9871f48df" containerName="nova-cell1-novncproxy-novncproxy" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.465226 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.467110 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.467832 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.472588 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.474044 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.475967 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.480167 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.480615 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.485422 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.526149 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.570811 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.571033 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.571130 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.571155 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4ggj\" (UniqueName: \"kubernetes.io/projected/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-kube-api-access-c4ggj\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.571183 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.571236 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-config-data\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.571286 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.571375 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.571449 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77f7fa0a-5732-4160-82e7-09b358f28403-logs\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.571517 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggnd7\" (UniqueName: \"kubernetes.io/projected/77f7fa0a-5732-4160-82e7-09b358f28403-kube-api-access-ggnd7\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.673757 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.673821 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77f7fa0a-5732-4160-82e7-09b358f28403-logs\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.673885 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggnd7\" (UniqueName: \"kubernetes.io/projected/77f7fa0a-5732-4160-82e7-09b358f28403-kube-api-access-ggnd7\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.673964 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.674016 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.674046 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.674065 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4ggj\" (UniqueName: \"kubernetes.io/projected/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-kube-api-access-c4ggj\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.674082 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.674102 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-config-data\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.674119 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.676530 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77f7fa0a-5732-4160-82e7-09b358f28403-logs\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.679666 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.687206 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.687674 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.687859 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.689276 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.689988 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.691053 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-config-data\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.696463 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggnd7\" (UniqueName: \"kubernetes.io/projected/77f7fa0a-5732-4160-82e7-09b358f28403-kube-api-access-ggnd7\") pod \"nova-metadata-0\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.699137 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4ggj\" (UniqueName: \"kubernetes.io/projected/c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d-kube-api-access-c4ggj\") pod \"nova-cell1-novncproxy-0\" (UID: \"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.769386 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.769451 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.770883 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.770906 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.773334 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.773369 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.786263 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 10:59:08 crc kubenswrapper[5016]: I1211 10:59:08.808425 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.006667 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-424j8"] Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.012604 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.022631 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-424j8"] Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.086320 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.086369 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.086394 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.086457 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.086494 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-config\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.086587 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dw8lm\" (UniqueName: \"kubernetes.io/projected/44f55261-c4b7-43a0-ad5d-7b84e6338f33-kube-api-access-dw8lm\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.195582 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.195631 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.195661 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.195721 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.195753 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-config\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.195854 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dw8lm\" (UniqueName: \"kubernetes.io/projected/44f55261-c4b7-43a0-ad5d-7b84e6338f33-kube-api-access-dw8lm\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.196687 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.198329 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.213070 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.214453 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.214691 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-config\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.233653 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dw8lm\" (UniqueName: \"kubernetes.io/projected/44f55261-c4b7-43a0-ad5d-7b84e6338f33-kube-api-access-dw8lm\") pod \"dnsmasq-dns-cd5cbd7b9-424j8\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.351520 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cfc02da-1137-4a3d-bea6-40a80cbf2037","Type":"ContainerStarted","Data":"782f4192cd86d30082ae56a22a2c8e4201aeb7c61b9848fe8439b6f6cdb72286"} Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.449740 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.487562 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27" path="/var/lib/kubelet/pods/13cb40f0-60c1-4bcd-9aa1-1b91bcacdf27/volumes" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.488276 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97a17b46-eb85-4f0f-a6ea-9db9871f48df" path="/var/lib/kubelet/pods/97a17b46-eb85-4f0f-a6ea-9db9871f48df/volumes" Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.553643 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 11 10:59:09 crc kubenswrapper[5016]: I1211 10:59:09.576307 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:59:10 crc kubenswrapper[5016]: I1211 10:59:10.050064 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-424j8"] Dec 11 10:59:10 crc kubenswrapper[5016]: W1211 10:59:10.051395 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44f55261_c4b7_43a0_ad5d_7b84e6338f33.slice/crio-adb08b5199271d39e4620c653ef0f9d7aaebe281f53cf8b6edad1c6ce6e30ccb WatchSource:0}: Error finding container adb08b5199271d39e4620c653ef0f9d7aaebe281f53cf8b6edad1c6ce6e30ccb: Status 404 returned error can't find the container with id adb08b5199271d39e4620c653ef0f9d7aaebe281f53cf8b6edad1c6ce6e30ccb Dec 11 10:59:10 crc kubenswrapper[5016]: I1211 10:59:10.367696 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" event={"ID":"44f55261-c4b7-43a0-ad5d-7b84e6338f33","Type":"ContainerStarted","Data":"548415aa97b85dbd8dd6c510c36083641f8dff81adf0c1581267dc4679d27281"} Dec 11 10:59:10 crc kubenswrapper[5016]: I1211 10:59:10.368215 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" event={"ID":"44f55261-c4b7-43a0-ad5d-7b84e6338f33","Type":"ContainerStarted","Data":"adb08b5199271d39e4620c653ef0f9d7aaebe281f53cf8b6edad1c6ce6e30ccb"} Dec 11 10:59:10 crc kubenswrapper[5016]: I1211 10:59:10.382811 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d","Type":"ContainerStarted","Data":"d12a3435dfbde7945d5eb8bcff7745d777564f68417d784ad31da4a33d7830ed"} Dec 11 10:59:10 crc kubenswrapper[5016]: I1211 10:59:10.382873 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d","Type":"ContainerStarted","Data":"7218fe6a68e98dbb605b2006e49b5aa0ee13a54eac59f8a10058e9bd1275b72e"} Dec 11 10:59:10 crc kubenswrapper[5016]: I1211 10:59:10.395392 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"77f7fa0a-5732-4160-82e7-09b358f28403","Type":"ContainerStarted","Data":"27743bda5dfb69f5a4a21201197e2774ef32201f52e12c9c3d2039ca7624fa7d"} Dec 11 10:59:10 crc kubenswrapper[5016]: I1211 10:59:10.395452 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"77f7fa0a-5732-4160-82e7-09b358f28403","Type":"ContainerStarted","Data":"a7e1bbc610e23616d90e2a7f69b97ba6b034a90e96aff1bd058bbd6ddb184a88"} Dec 11 10:59:10 crc kubenswrapper[5016]: I1211 10:59:10.395465 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"77f7fa0a-5732-4160-82e7-09b358f28403","Type":"ContainerStarted","Data":"618525b4f7f1471341327faec930dfc425f7f7d151a69a49d257a578c070cc5e"} Dec 11 10:59:10 crc kubenswrapper[5016]: I1211 10:59:10.449408 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.449372735 podStartE2EDuration="2.449372735s" podCreationTimestamp="2025-12-11 10:59:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:59:10.418951171 +0000 UTC m=+1467.237510770" watchObservedRunningTime="2025-12-11 10:59:10.449372735 +0000 UTC m=+1467.267932334" Dec 11 10:59:10 crc kubenswrapper[5016]: I1211 10:59:10.479651 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.479627994 podStartE2EDuration="2.479627994s" podCreationTimestamp="2025-12-11 10:59:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:59:10.442150551 +0000 UTC m=+1467.260710140" watchObservedRunningTime="2025-12-11 10:59:10.479627994 +0000 UTC m=+1467.298187573" Dec 11 10:59:11 crc kubenswrapper[5016]: I1211 10:59:11.410106 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cfc02da-1137-4a3d-bea6-40a80cbf2037","Type":"ContainerStarted","Data":"f1421fad3a72f42b46810be25dd63f5f583fb2aff0959d479197cc221a0c9afc"} Dec 11 10:59:11 crc kubenswrapper[5016]: I1211 10:59:11.412482 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" event={"ID":"44f55261-c4b7-43a0-ad5d-7b84e6338f33","Type":"ContainerDied","Data":"548415aa97b85dbd8dd6c510c36083641f8dff81adf0c1581267dc4679d27281"} Dec 11 10:59:11 crc kubenswrapper[5016]: I1211 10:59:11.412679 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 10:59:11 crc kubenswrapper[5016]: I1211 10:59:11.412187 5016 generic.go:334] "Generic (PLEG): container finished" podID="44f55261-c4b7-43a0-ad5d-7b84e6338f33" containerID="548415aa97b85dbd8dd6c510c36083641f8dff81adf0c1581267dc4679d27281" exitCode=0 Dec 11 10:59:11 crc kubenswrapper[5016]: I1211 10:59:11.443898 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.934379347 podStartE2EDuration="6.443866946s" podCreationTimestamp="2025-12-11 10:59:05 +0000 UTC" firstStartedPulling="2025-12-11 10:59:06.581179405 +0000 UTC m=+1463.399738984" lastFinishedPulling="2025-12-11 10:59:10.090667004 +0000 UTC m=+1466.909226583" observedRunningTime="2025-12-11 10:59:11.434284545 +0000 UTC m=+1468.252844124" watchObservedRunningTime="2025-12-11 10:59:11.443866946 +0000 UTC m=+1468.262426535" Dec 11 10:59:11 crc kubenswrapper[5016]: I1211 10:59:11.644521 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:59:11 crc kubenswrapper[5016]: I1211 10:59:11.645141 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" containerName="nova-api-log" containerID="cri-o://4f646bb61b0dc907159e72126606847729f4d780851bce75cdcacf86e8debfd2" gracePeriod=30 Dec 11 10:59:11 crc kubenswrapper[5016]: I1211 10:59:11.645526 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" containerName="nova-api-api" containerID="cri-o://c229de9c004a54211eaf5828d07e49ec0470c22b53b10cc1a36d262b14264d05" gracePeriod=30 Dec 11 10:59:12 crc kubenswrapper[5016]: I1211 10:59:12.435700 5016 generic.go:334] "Generic (PLEG): container finished" podID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" containerID="4f646bb61b0dc907159e72126606847729f4d780851bce75cdcacf86e8debfd2" exitCode=143 Dec 11 10:59:12 crc kubenswrapper[5016]: I1211 10:59:12.437229 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0","Type":"ContainerDied","Data":"4f646bb61b0dc907159e72126606847729f4d780851bce75cdcacf86e8debfd2"} Dec 11 10:59:12 crc kubenswrapper[5016]: I1211 10:59:12.440636 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" event={"ID":"44f55261-c4b7-43a0-ad5d-7b84e6338f33","Type":"ContainerStarted","Data":"b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc"} Dec 11 10:59:12 crc kubenswrapper[5016]: I1211 10:59:12.471112 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" podStartSLOduration=4.471083607 podStartE2EDuration="4.471083607s" podCreationTimestamp="2025-12-11 10:59:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:59:12.469815967 +0000 UTC m=+1469.288375546" watchObservedRunningTime="2025-12-11 10:59:12.471083607 +0000 UTC m=+1469.289643186" Dec 11 10:59:12 crc kubenswrapper[5016]: I1211 10:59:12.933501 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:59:12 crc kubenswrapper[5016]: I1211 10:59:12.933821 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:59:13 crc kubenswrapper[5016]: I1211 10:59:13.094057 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:59:13 crc kubenswrapper[5016]: I1211 10:59:13.453416 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:13 crc kubenswrapper[5016]: I1211 10:59:13.794360 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 11 10:59:13 crc kubenswrapper[5016]: I1211 10:59:13.794456 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 11 10:59:13 crc kubenswrapper[5016]: I1211 10:59:13.809026 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:14 crc kubenswrapper[5016]: I1211 10:59:14.461791 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="ceilometer-central-agent" containerID="cri-o://d14b9111a972dc5b09e116bd69ea02e2636fe3de07b9d09dbc6f3a90594b0f8f" gracePeriod=30 Dec 11 10:59:14 crc kubenswrapper[5016]: I1211 10:59:14.461860 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="proxy-httpd" containerID="cri-o://f1421fad3a72f42b46810be25dd63f5f583fb2aff0959d479197cc221a0c9afc" gracePeriod=30 Dec 11 10:59:14 crc kubenswrapper[5016]: I1211 10:59:14.461909 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="sg-core" containerID="cri-o://782f4192cd86d30082ae56a22a2c8e4201aeb7c61b9848fe8439b6f6cdb72286" gracePeriod=30 Dec 11 10:59:14 crc kubenswrapper[5016]: I1211 10:59:14.462007 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="ceilometer-notification-agent" containerID="cri-o://44e44849b2763e2f20ac5e148a9f79befef1a11e834ee2ed9879d9d7d4c10e74" gracePeriod=30 Dec 11 10:59:14 crc kubenswrapper[5016]: I1211 10:59:14.701494 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.486392 5016 generic.go:334] "Generic (PLEG): container finished" podID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" containerID="c229de9c004a54211eaf5828d07e49ec0470c22b53b10cc1a36d262b14264d05" exitCode=0 Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.489119 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0","Type":"ContainerDied","Data":"c229de9c004a54211eaf5828d07e49ec0470c22b53b10cc1a36d262b14264d05"} Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.491269 5016 generic.go:334] "Generic (PLEG): container finished" podID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerID="f1421fad3a72f42b46810be25dd63f5f583fb2aff0959d479197cc221a0c9afc" exitCode=0 Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.491296 5016 generic.go:334] "Generic (PLEG): container finished" podID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerID="782f4192cd86d30082ae56a22a2c8e4201aeb7c61b9848fe8439b6f6cdb72286" exitCode=2 Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.491304 5016 generic.go:334] "Generic (PLEG): container finished" podID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerID="44e44849b2763e2f20ac5e148a9f79befef1a11e834ee2ed9879d9d7d4c10e74" exitCode=0 Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.491311 5016 generic.go:334] "Generic (PLEG): container finished" podID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerID="d14b9111a972dc5b09e116bd69ea02e2636fe3de07b9d09dbc6f3a90594b0f8f" exitCode=0 Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.491340 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cfc02da-1137-4a3d-bea6-40a80cbf2037","Type":"ContainerDied","Data":"f1421fad3a72f42b46810be25dd63f5f583fb2aff0959d479197cc221a0c9afc"} Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.491376 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cfc02da-1137-4a3d-bea6-40a80cbf2037","Type":"ContainerDied","Data":"782f4192cd86d30082ae56a22a2c8e4201aeb7c61b9848fe8439b6f6cdb72286"} Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.491388 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cfc02da-1137-4a3d-bea6-40a80cbf2037","Type":"ContainerDied","Data":"44e44849b2763e2f20ac5e148a9f79befef1a11e834ee2ed9879d9d7d4c10e74"} Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.491397 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cfc02da-1137-4a3d-bea6-40a80cbf2037","Type":"ContainerDied","Data":"d14b9111a972dc5b09e116bd69ea02e2636fe3de07b9d09dbc6f3a90594b0f8f"} Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.517837 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.663714 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrfh6\" (UniqueName: \"kubernetes.io/projected/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-kube-api-access-vrfh6\") pod \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.664432 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-combined-ca-bundle\") pod \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.664548 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-config-data\") pod \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.664713 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-logs\") pod \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\" (UID: \"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0\") " Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.666051 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-logs" (OuterVolumeSpecName: "logs") pod "5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" (UID: "5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.688411 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-kube-api-access-vrfh6" (OuterVolumeSpecName: "kube-api-access-vrfh6") pod "5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" (UID: "5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0"). InnerVolumeSpecName "kube-api-access-vrfh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.731529 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-config-data" (OuterVolumeSpecName: "config-data") pod "5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" (UID: "5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.731642 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" (UID: "5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.768082 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.768127 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrfh6\" (UniqueName: \"kubernetes.io/projected/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-kube-api-access-vrfh6\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.768146 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.768159 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.833306 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.972156 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cfc02da-1137-4a3d-bea6-40a80cbf2037-run-httpd\") pod \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.972322 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-combined-ca-bundle\") pod \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.972464 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cfc02da-1137-4a3d-bea6-40a80cbf2037-log-httpd\") pod \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.972528 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cfc02da-1137-4a3d-bea6-40a80cbf2037-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1cfc02da-1137-4a3d-bea6-40a80cbf2037" (UID: "1cfc02da-1137-4a3d-bea6-40a80cbf2037"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.972552 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-ceilometer-tls-certs\") pod \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.972617 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-scripts\") pod \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.972675 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5kt4\" (UniqueName: \"kubernetes.io/projected/1cfc02da-1137-4a3d-bea6-40a80cbf2037-kube-api-access-z5kt4\") pod \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.972715 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-config-data\") pod \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.972809 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-sg-core-conf-yaml\") pod \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\" (UID: \"1cfc02da-1137-4a3d-bea6-40a80cbf2037\") " Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.973250 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cfc02da-1137-4a3d-bea6-40a80cbf2037-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1cfc02da-1137-4a3d-bea6-40a80cbf2037" (UID: "1cfc02da-1137-4a3d-bea6-40a80cbf2037"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.982059 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-scripts" (OuterVolumeSpecName: "scripts") pod "1cfc02da-1137-4a3d-bea6-40a80cbf2037" (UID: "1cfc02da-1137-4a3d-bea6-40a80cbf2037"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.987686 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cfc02da-1137-4a3d-bea6-40a80cbf2037-kube-api-access-z5kt4" (OuterVolumeSpecName: "kube-api-access-z5kt4") pod "1cfc02da-1137-4a3d-bea6-40a80cbf2037" (UID: "1cfc02da-1137-4a3d-bea6-40a80cbf2037"). InnerVolumeSpecName "kube-api-access-z5kt4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.988172 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5kt4\" (UniqueName: \"kubernetes.io/projected/1cfc02da-1137-4a3d-bea6-40a80cbf2037-kube-api-access-z5kt4\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.988229 5016 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cfc02da-1137-4a3d-bea6-40a80cbf2037-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.988241 5016 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cfc02da-1137-4a3d-bea6-40a80cbf2037-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:15 crc kubenswrapper[5016]: I1211 10:59:15.988254 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.016563 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1cfc02da-1137-4a3d-bea6-40a80cbf2037" (UID: "1cfc02da-1137-4a3d-bea6-40a80cbf2037"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.041304 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "1cfc02da-1137-4a3d-bea6-40a80cbf2037" (UID: "1cfc02da-1137-4a3d-bea6-40a80cbf2037"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.090210 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1cfc02da-1137-4a3d-bea6-40a80cbf2037" (UID: "1cfc02da-1137-4a3d-bea6-40a80cbf2037"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.090621 5016 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.090654 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.090667 5016 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.101201 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-config-data" (OuterVolumeSpecName: "config-data") pod "1cfc02da-1137-4a3d-bea6-40a80cbf2037" (UID: "1cfc02da-1137-4a3d-bea6-40a80cbf2037"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.192505 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cfc02da-1137-4a3d-bea6-40a80cbf2037-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.504839 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0","Type":"ContainerDied","Data":"090c50cfd1f53eca65fd596169022d12b7b8551b66007ee6b14b7129dd1598bf"} Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.504909 5016 scope.go:117] "RemoveContainer" containerID="c229de9c004a54211eaf5828d07e49ec0470c22b53b10cc1a36d262b14264d05" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.505076 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.509953 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cfc02da-1137-4a3d-bea6-40a80cbf2037","Type":"ContainerDied","Data":"8d6f00c84273ca1a2c2c0cdc404331e2896ca8148fe96cf1f17ad6073e7d5264"} Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.510077 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.541244 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.543358 5016 scope.go:117] "RemoveContainer" containerID="4f646bb61b0dc907159e72126606847729f4d780851bce75cdcacf86e8debfd2" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.553257 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.570263 5016 scope.go:117] "RemoveContainer" containerID="f1421fad3a72f42b46810be25dd63f5f583fb2aff0959d479197cc221a0c9afc" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.577898 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.587877 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 11 10:59:16 crc kubenswrapper[5016]: E1211 10:59:16.588396 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="ceilometer-central-agent" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.588419 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="ceilometer-central-agent" Dec 11 10:59:16 crc kubenswrapper[5016]: E1211 10:59:16.588449 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="proxy-httpd" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.588456 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="proxy-httpd" Dec 11 10:59:16 crc kubenswrapper[5016]: E1211 10:59:16.588467 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" containerName="nova-api-api" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.588477 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" containerName="nova-api-api" Dec 11 10:59:16 crc kubenswrapper[5016]: E1211 10:59:16.588499 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="sg-core" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.588505 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="sg-core" Dec 11 10:59:16 crc kubenswrapper[5016]: E1211 10:59:16.588525 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" containerName="nova-api-log" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.588531 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" containerName="nova-api-log" Dec 11 10:59:16 crc kubenswrapper[5016]: E1211 10:59:16.588540 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="ceilometer-notification-agent" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.588545 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="ceilometer-notification-agent" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.588744 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" containerName="nova-api-log" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.588770 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" containerName="nova-api-api" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.588794 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="ceilometer-notification-agent" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.588811 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="ceilometer-central-agent" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.588824 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="sg-core" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.588835 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" containerName="proxy-httpd" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.593963 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.598316 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.600132 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.600305 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.600147 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.625012 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.649713 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.659699 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.675755 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.676353 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.676900 5016 scope.go:117] "RemoveContainer" containerID="782f4192cd86d30082ae56a22a2c8e4201aeb7c61b9848fe8439b6f6cdb72286" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.678203 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.711322 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.711722 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac8ef276-d90a-4d13-b8b1-fc452060abfb-logs\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.712038 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-public-tls-certs\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.712269 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.712344 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.712485 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-scripts\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.712540 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.712587 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-config-data\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.712641 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mczmp\" (UniqueName: \"kubernetes.io/projected/ac8ef276-d90a-4d13-b8b1-fc452060abfb-kube-api-access-mczmp\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.712862 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd3155c0-9091-4e5e-888d-67b0256b0b51-run-httpd\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.713040 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzmr6\" (UniqueName: \"kubernetes.io/projected/fd3155c0-9091-4e5e-888d-67b0256b0b51-kube-api-access-gzmr6\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.713118 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd3155c0-9091-4e5e-888d-67b0256b0b51-log-httpd\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.713324 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.713428 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-config-data\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.715216 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.718272 5016 scope.go:117] "RemoveContainer" containerID="44e44849b2763e2f20ac5e148a9f79befef1a11e834ee2ed9879d9d7d4c10e74" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.744587 5016 scope.go:117] "RemoveContainer" containerID="d14b9111a972dc5b09e116bd69ea02e2636fe3de07b9d09dbc6f3a90594b0f8f" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815006 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd3155c0-9091-4e5e-888d-67b0256b0b51-run-httpd\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815076 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzmr6\" (UniqueName: \"kubernetes.io/projected/fd3155c0-9091-4e5e-888d-67b0256b0b51-kube-api-access-gzmr6\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815108 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd3155c0-9091-4e5e-888d-67b0256b0b51-log-httpd\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815147 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815168 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-config-data\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815245 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815274 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac8ef276-d90a-4d13-b8b1-fc452060abfb-logs\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815299 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-public-tls-certs\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815322 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815343 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815380 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-scripts\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815403 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815428 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-config-data\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.815453 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mczmp\" (UniqueName: \"kubernetes.io/projected/ac8ef276-d90a-4d13-b8b1-fc452060abfb-kube-api-access-mczmp\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.816434 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd3155c0-9091-4e5e-888d-67b0256b0b51-run-httpd\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.816926 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac8ef276-d90a-4d13-b8b1-fc452060abfb-logs\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.817846 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fd3155c0-9091-4e5e-888d-67b0256b0b51-log-httpd\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.821813 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.823032 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.823120 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.823807 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.824569 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-config-data\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.825178 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-config-data\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.827741 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd3155c0-9091-4e5e-888d-67b0256b0b51-scripts\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.827829 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.831567 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-public-tls-certs\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.834907 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzmr6\" (UniqueName: \"kubernetes.io/projected/fd3155c0-9091-4e5e-888d-67b0256b0b51-kube-api-access-gzmr6\") pod \"ceilometer-0\" (UID: \"fd3155c0-9091-4e5e-888d-67b0256b0b51\") " pod="openstack/ceilometer-0" Dec 11 10:59:16 crc kubenswrapper[5016]: I1211 10:59:16.840594 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mczmp\" (UniqueName: \"kubernetes.io/projected/ac8ef276-d90a-4d13-b8b1-fc452060abfb-kube-api-access-mczmp\") pod \"nova-api-0\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " pod="openstack/nova-api-0" Dec 11 10:59:17 crc kubenswrapper[5016]: I1211 10:59:17.020410 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:59:17 crc kubenswrapper[5016]: I1211 10:59:17.035612 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 11 10:59:17 crc kubenswrapper[5016]: I1211 10:59:17.487210 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cfc02da-1137-4a3d-bea6-40a80cbf2037" path="/var/lib/kubelet/pods/1cfc02da-1137-4a3d-bea6-40a80cbf2037/volumes" Dec 11 10:59:17 crc kubenswrapper[5016]: I1211 10:59:17.488419 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0" path="/var/lib/kubelet/pods/5d8a8c3c-1cc2-43b5-8ba3-ada9be55acc0/volumes" Dec 11 10:59:17 crc kubenswrapper[5016]: I1211 10:59:17.560262 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:59:17 crc kubenswrapper[5016]: W1211 10:59:17.562433 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac8ef276_d90a_4d13_b8b1_fc452060abfb.slice/crio-40f89a2c181cf223e4cd349d8acedcc1880653d86b93698f279bedf1292edac5 WatchSource:0}: Error finding container 40f89a2c181cf223e4cd349d8acedcc1880653d86b93698f279bedf1292edac5: Status 404 returned error can't find the container with id 40f89a2c181cf223e4cd349d8acedcc1880653d86b93698f279bedf1292edac5 Dec 11 10:59:17 crc kubenswrapper[5016]: W1211 10:59:17.605498 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd3155c0_9091_4e5e_888d_67b0256b0b51.slice/crio-c6fcdf61c4c1cfecf265c8dea1b6f6a2d3655db2f319cc6dbd889d6a67a1f65b WatchSource:0}: Error finding container c6fcdf61c4c1cfecf265c8dea1b6f6a2d3655db2f319cc6dbd889d6a67a1f65b: Status 404 returned error can't find the container with id c6fcdf61c4c1cfecf265c8dea1b6f6a2d3655db2f319cc6dbd889d6a67a1f65b Dec 11 10:59:17 crc kubenswrapper[5016]: I1211 10:59:17.608058 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 11 10:59:18 crc kubenswrapper[5016]: I1211 10:59:18.539929 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd3155c0-9091-4e5e-888d-67b0256b0b51","Type":"ContainerStarted","Data":"9e2015f16d3b29004968679ae41af82f5f1c8550dd38a2aa30e14a10d83c3ec9"} Dec 11 10:59:18 crc kubenswrapper[5016]: I1211 10:59:18.540296 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd3155c0-9091-4e5e-888d-67b0256b0b51","Type":"ContainerStarted","Data":"c6fcdf61c4c1cfecf265c8dea1b6f6a2d3655db2f319cc6dbd889d6a67a1f65b"} Dec 11 10:59:18 crc kubenswrapper[5016]: I1211 10:59:18.545013 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ac8ef276-d90a-4d13-b8b1-fc452060abfb","Type":"ContainerStarted","Data":"95b4aa0b677b92252cbc54928a587c4cb31e1e9694c0503f84f3e117d2b34080"} Dec 11 10:59:18 crc kubenswrapper[5016]: I1211 10:59:18.545058 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ac8ef276-d90a-4d13-b8b1-fc452060abfb","Type":"ContainerStarted","Data":"af91ba04309a4f7ea6376546512934134eecb3fe819c90e3737c5eb17bc53fd4"} Dec 11 10:59:18 crc kubenswrapper[5016]: I1211 10:59:18.545071 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ac8ef276-d90a-4d13-b8b1-fc452060abfb","Type":"ContainerStarted","Data":"40f89a2c181cf223e4cd349d8acedcc1880653d86b93698f279bedf1292edac5"} Dec 11 10:59:18 crc kubenswrapper[5016]: I1211 10:59:18.578235 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.578199376 podStartE2EDuration="2.578199376s" podCreationTimestamp="2025-12-11 10:59:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:59:18.569034806 +0000 UTC m=+1475.387594415" watchObservedRunningTime="2025-12-11 10:59:18.578199376 +0000 UTC m=+1475.396758975" Dec 11 10:59:18 crc kubenswrapper[5016]: I1211 10:59:18.787792 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 11 10:59:18 crc kubenswrapper[5016]: I1211 10:59:18.787833 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 11 10:59:18 crc kubenswrapper[5016]: I1211 10:59:18.809547 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:18 crc kubenswrapper[5016]: I1211 10:59:18.829153 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.451266 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.543427 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-5p6nv"] Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.543718 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" podUID="8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" containerName="dnsmasq-dns" containerID="cri-o://715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b" gracePeriod=10 Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.592810 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.801239 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.801299 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.834583 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-sxslc"] Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.836649 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.842808 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-sxslc"] Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.892858 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.893154 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.907264 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-sxslc\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.907380 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-scripts\") pod \"nova-cell1-cell-mapping-sxslc\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.907413 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-config-data\") pod \"nova-cell1-cell-mapping-sxslc\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:19 crc kubenswrapper[5016]: I1211 10:59:19.907513 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hh6bs\" (UniqueName: \"kubernetes.io/projected/64fce467-b180-44c5-9d9c-e62505e87282-kube-api-access-hh6bs\") pod \"nova-cell1-cell-mapping-sxslc\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:20 crc kubenswrapper[5016]: I1211 10:59:20.017755 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-scripts\") pod \"nova-cell1-cell-mapping-sxslc\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:20 crc kubenswrapper[5016]: I1211 10:59:20.017838 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-config-data\") pod \"nova-cell1-cell-mapping-sxslc\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:20 crc kubenswrapper[5016]: I1211 10:59:20.018212 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hh6bs\" (UniqueName: \"kubernetes.io/projected/64fce467-b180-44c5-9d9c-e62505e87282-kube-api-access-hh6bs\") pod \"nova-cell1-cell-mapping-sxslc\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:20 crc kubenswrapper[5016]: I1211 10:59:20.018584 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-sxslc\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:20 crc kubenswrapper[5016]: I1211 10:59:20.028206 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-scripts\") pod \"nova-cell1-cell-mapping-sxslc\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:20 crc kubenswrapper[5016]: I1211 10:59:20.029507 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-config-data\") pod \"nova-cell1-cell-mapping-sxslc\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:20 crc kubenswrapper[5016]: I1211 10:59:20.030220 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-sxslc\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:20 crc kubenswrapper[5016]: I1211 10:59:20.047066 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hh6bs\" (UniqueName: \"kubernetes.io/projected/64fce467-b180-44c5-9d9c-e62505e87282-kube-api-access-hh6bs\") pod \"nova-cell1-cell-mapping-sxslc\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:20 crc kubenswrapper[5016]: I1211 10:59:20.211783 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:20 crc kubenswrapper[5016]: I1211 10:59:20.465143 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" podUID="8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.186:5353: connect: connection refused" Dec 11 10:59:20 crc kubenswrapper[5016]: I1211 10:59:20.823418 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-sxslc"] Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.221575 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.259898 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-ovsdbserver-sb\") pod \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.260245 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-dns-svc\") pod \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.260314 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-ovsdbserver-nb\") pod \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.260355 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5l9q\" (UniqueName: \"kubernetes.io/projected/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-kube-api-access-z5l9q\") pod \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.260539 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-dns-swift-storage-0\") pod \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.260669 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-config\") pod \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\" (UID: \"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3\") " Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.266295 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-kube-api-access-z5l9q" (OuterVolumeSpecName: "kube-api-access-z5l9q") pod "8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" (UID: "8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3"). InnerVolumeSpecName "kube-api-access-z5l9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.370590 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5l9q\" (UniqueName: \"kubernetes.io/projected/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-kube-api-access-z5l9q\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.370697 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" (UID: "8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.393226 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" (UID: "8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.397452 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" (UID: "8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.400535 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" (UID: "8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.412547 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-config" (OuterVolumeSpecName: "config") pod "8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" (UID: "8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.472552 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-config\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.472627 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.472640 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.472650 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.472661 5016 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.596479 5016 generic.go:334] "Generic (PLEG): container finished" podID="8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" containerID="715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b" exitCode=0 Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.596571 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" event={"ID":"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3","Type":"ContainerDied","Data":"715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b"} Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.596585 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.596654 5016 scope.go:117] "RemoveContainer" containerID="715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.596637 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-5p6nv" event={"ID":"8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3","Type":"ContainerDied","Data":"364a07a91b998989a8cfb841a021971dcad2c28535860046e059f1325c41d79e"} Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.605178 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd3155c0-9091-4e5e-888d-67b0256b0b51","Type":"ContainerStarted","Data":"a1d4de193f3c68cc337eb7bc4aad8beabf8c351fa044bb58442e30b3f4514067"} Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.610435 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-sxslc" event={"ID":"64fce467-b180-44c5-9d9c-e62505e87282","Type":"ContainerStarted","Data":"1c5233bd6e4d1368be33d9b819d30c7941f2fee59bbf3d9358cd1a05376cb9d1"} Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.610497 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-sxslc" event={"ID":"64fce467-b180-44c5-9d9c-e62505e87282","Type":"ContainerStarted","Data":"0689734f1b2ce4bcb581150ed1df0e826474fbceda1937430ca3fa7d4c54d442"} Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.626423 5016 scope.go:117] "RemoveContainer" containerID="2e22ea3e2379155e21a3130edd82887a257bfd53b1cd64da60a01764d1f430bd" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.637468 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-5p6nv"] Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.650601 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-5p6nv"] Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.651480 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-sxslc" podStartSLOduration=2.651453476 podStartE2EDuration="2.651453476s" podCreationTimestamp="2025-12-11 10:59:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:59:21.646318123 +0000 UTC m=+1478.464877702" watchObservedRunningTime="2025-12-11 10:59:21.651453476 +0000 UTC m=+1478.470013055" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.663091 5016 scope.go:117] "RemoveContainer" containerID="715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b" Dec 11 10:59:21 crc kubenswrapper[5016]: E1211 10:59:21.663852 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b\": container with ID starting with 715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b not found: ID does not exist" containerID="715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.663910 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b"} err="failed to get container status \"715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b\": rpc error: code = NotFound desc = could not find container \"715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b\": container with ID starting with 715e280e984a5cd5b74f6d754732c3c649d4a3b4a893acb0c67f833947f8735b not found: ID does not exist" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.663962 5016 scope.go:117] "RemoveContainer" containerID="2e22ea3e2379155e21a3130edd82887a257bfd53b1cd64da60a01764d1f430bd" Dec 11 10:59:21 crc kubenswrapper[5016]: E1211 10:59:21.664655 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e22ea3e2379155e21a3130edd82887a257bfd53b1cd64da60a01764d1f430bd\": container with ID starting with 2e22ea3e2379155e21a3130edd82887a257bfd53b1cd64da60a01764d1f430bd not found: ID does not exist" containerID="2e22ea3e2379155e21a3130edd82887a257bfd53b1cd64da60a01764d1f430bd" Dec 11 10:59:21 crc kubenswrapper[5016]: I1211 10:59:21.664705 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e22ea3e2379155e21a3130edd82887a257bfd53b1cd64da60a01764d1f430bd"} err="failed to get container status \"2e22ea3e2379155e21a3130edd82887a257bfd53b1cd64da60a01764d1f430bd\": rpc error: code = NotFound desc = could not find container \"2e22ea3e2379155e21a3130edd82887a257bfd53b1cd64da60a01764d1f430bd\": container with ID starting with 2e22ea3e2379155e21a3130edd82887a257bfd53b1cd64da60a01764d1f430bd not found: ID does not exist" Dec 11 10:59:22 crc kubenswrapper[5016]: I1211 10:59:22.638872 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd3155c0-9091-4e5e-888d-67b0256b0b51","Type":"ContainerStarted","Data":"9703d354d0087b5cf98a24394b7ff40dd5a280cd3e1afc64fafd931818fabefd"} Dec 11 10:59:23 crc kubenswrapper[5016]: I1211 10:59:23.487103 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" path="/var/lib/kubelet/pods/8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3/volumes" Dec 11 10:59:23 crc kubenswrapper[5016]: I1211 10:59:23.655332 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fd3155c0-9091-4e5e-888d-67b0256b0b51","Type":"ContainerStarted","Data":"14431344ac07da8bdaadf2897a799555bfc1ce258180688972cfd6d658170fa1"} Dec 11 10:59:23 crc kubenswrapper[5016]: I1211 10:59:23.657195 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 11 10:59:23 crc kubenswrapper[5016]: I1211 10:59:23.682074 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.145301627 podStartE2EDuration="7.682052463s" podCreationTimestamp="2025-12-11 10:59:16 +0000 UTC" firstStartedPulling="2025-12-11 10:59:17.608872522 +0000 UTC m=+1474.427432101" lastFinishedPulling="2025-12-11 10:59:23.145623368 +0000 UTC m=+1479.964182937" observedRunningTime="2025-12-11 10:59:23.6781823 +0000 UTC m=+1480.496741879" watchObservedRunningTime="2025-12-11 10:59:23.682052463 +0000 UTC m=+1480.500612042" Dec 11 10:59:27 crc kubenswrapper[5016]: I1211 10:59:27.021037 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 10:59:27 crc kubenswrapper[5016]: I1211 10:59:27.021352 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 10:59:27 crc kubenswrapper[5016]: I1211 10:59:27.701154 5016 generic.go:334] "Generic (PLEG): container finished" podID="64fce467-b180-44c5-9d9c-e62505e87282" containerID="1c5233bd6e4d1368be33d9b819d30c7941f2fee59bbf3d9358cd1a05376cb9d1" exitCode=0 Dec 11 10:59:27 crc kubenswrapper[5016]: I1211 10:59:27.701199 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-sxslc" event={"ID":"64fce467-b180-44c5-9d9c-e62505e87282","Type":"ContainerDied","Data":"1c5233bd6e4d1368be33d9b819d30c7941f2fee59bbf3d9358cd1a05376cb9d1"} Dec 11 10:59:28 crc kubenswrapper[5016]: I1211 10:59:28.033077 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.196:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 10:59:28 crc kubenswrapper[5016]: I1211 10:59:28.033098 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.196:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 10:59:28 crc kubenswrapper[5016]: I1211 10:59:28.793259 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 11 10:59:28 crc kubenswrapper[5016]: I1211 10:59:28.793990 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 11 10:59:28 crc kubenswrapper[5016]: I1211 10:59:28.815449 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.119432 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.238746 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-combined-ca-bundle\") pod \"64fce467-b180-44c5-9d9c-e62505e87282\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.238858 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-scripts\") pod \"64fce467-b180-44c5-9d9c-e62505e87282\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.238918 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-config-data\") pod \"64fce467-b180-44c5-9d9c-e62505e87282\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.239051 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hh6bs\" (UniqueName: \"kubernetes.io/projected/64fce467-b180-44c5-9d9c-e62505e87282-kube-api-access-hh6bs\") pod \"64fce467-b180-44c5-9d9c-e62505e87282\" (UID: \"64fce467-b180-44c5-9d9c-e62505e87282\") " Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.246647 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-scripts" (OuterVolumeSpecName: "scripts") pod "64fce467-b180-44c5-9d9c-e62505e87282" (UID: "64fce467-b180-44c5-9d9c-e62505e87282"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.247354 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64fce467-b180-44c5-9d9c-e62505e87282-kube-api-access-hh6bs" (OuterVolumeSpecName: "kube-api-access-hh6bs") pod "64fce467-b180-44c5-9d9c-e62505e87282" (UID: "64fce467-b180-44c5-9d9c-e62505e87282"). InnerVolumeSpecName "kube-api-access-hh6bs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.272476 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "64fce467-b180-44c5-9d9c-e62505e87282" (UID: "64fce467-b180-44c5-9d9c-e62505e87282"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.276251 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-config-data" (OuterVolumeSpecName: "config-data") pod "64fce467-b180-44c5-9d9c-e62505e87282" (UID: "64fce467-b180-44c5-9d9c-e62505e87282"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.341721 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.341757 5016 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-scripts\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.341769 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64fce467-b180-44c5-9d9c-e62505e87282-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.341778 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hh6bs\" (UniqueName: \"kubernetes.io/projected/64fce467-b180-44c5-9d9c-e62505e87282-kube-api-access-hh6bs\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.721266 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-sxslc" Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.721236 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-sxslc" event={"ID":"64fce467-b180-44c5-9d9c-e62505e87282","Type":"ContainerDied","Data":"0689734f1b2ce4bcb581150ed1df0e826474fbceda1937430ca3fa7d4c54d442"} Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.721348 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0689734f1b2ce4bcb581150ed1df0e826474fbceda1937430ca3fa7d4c54d442" Dec 11 10:59:29 crc kubenswrapper[5016]: I1211 10:59:29.772799 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 11 10:59:30 crc kubenswrapper[5016]: I1211 10:59:30.001365 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:59:30 crc kubenswrapper[5016]: I1211 10:59:30.001968 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" containerName="nova-api-log" containerID="cri-o://af91ba04309a4f7ea6376546512934134eecb3fe819c90e3737c5eb17bc53fd4" gracePeriod=30 Dec 11 10:59:30 crc kubenswrapper[5016]: I1211 10:59:30.002146 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" containerName="nova-api-api" containerID="cri-o://95b4aa0b677b92252cbc54928a587c4cb31e1e9694c0503f84f3e117d2b34080" gracePeriod=30 Dec 11 10:59:30 crc kubenswrapper[5016]: I1211 10:59:30.028693 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:59:30 crc kubenswrapper[5016]: I1211 10:59:30.037345 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:59:30 crc kubenswrapper[5016]: I1211 10:59:30.037594 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="166e24ea-2c2f-4503-a5de-4f0ceb9f31c9" containerName="nova-scheduler-scheduler" containerID="cri-o://6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5" gracePeriod=30 Dec 11 10:59:30 crc kubenswrapper[5016]: I1211 10:59:30.733547 5016 generic.go:334] "Generic (PLEG): container finished" podID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" containerID="af91ba04309a4f7ea6376546512934134eecb3fe819c90e3737c5eb17bc53fd4" exitCode=143 Dec 11 10:59:30 crc kubenswrapper[5016]: I1211 10:59:30.734205 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ac8ef276-d90a-4d13-b8b1-fc452060abfb","Type":"ContainerDied","Data":"af91ba04309a4f7ea6376546512934134eecb3fe819c90e3737c5eb17bc53fd4"} Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.405782 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hlz49"] Dec 11 10:59:31 crc kubenswrapper[5016]: E1211 10:59:31.406698 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" containerName="init" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.406719 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" containerName="init" Dec 11 10:59:31 crc kubenswrapper[5016]: E1211 10:59:31.406737 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64fce467-b180-44c5-9d9c-e62505e87282" containerName="nova-manage" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.406745 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="64fce467-b180-44c5-9d9c-e62505e87282" containerName="nova-manage" Dec 11 10:59:31 crc kubenswrapper[5016]: E1211 10:59:31.406754 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" containerName="dnsmasq-dns" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.406762 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" containerName="dnsmasq-dns" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.406998 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="64fce467-b180-44c5-9d9c-e62505e87282" containerName="nova-manage" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.407016 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fa379d5-a5d0-4d0c-b39c-41fb46fa53e3" containerName="dnsmasq-dns" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.408393 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:31 crc kubenswrapper[5016]: E1211 10:59:31.438819 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5 is running failed: container process not found" containerID="6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 10:59:31 crc kubenswrapper[5016]: E1211 10:59:31.439601 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5 is running failed: container process not found" containerID="6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 10:59:31 crc kubenswrapper[5016]: E1211 10:59:31.440234 5016 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5 is running failed: container process not found" containerID="6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 11 10:59:31 crc kubenswrapper[5016]: E1211 10:59:31.440276 5016 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="166e24ea-2c2f-4503-a5de-4f0ceb9f31c9" containerName="nova-scheduler-scheduler" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.448798 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hlz49"] Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.516543 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.599570 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-utilities\") pod \"redhat-operators-hlz49\" (UID: \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\") " pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.599646 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-catalog-content\") pod \"redhat-operators-hlz49\" (UID: \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\") " pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.599686 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2szzr\" (UniqueName: \"kubernetes.io/projected/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-kube-api-access-2szzr\") pod \"redhat-operators-hlz49\" (UID: \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\") " pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.701802 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-combined-ca-bundle\") pod \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\" (UID: \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\") " Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.702008 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhrs9\" (UniqueName: \"kubernetes.io/projected/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-kube-api-access-dhrs9\") pod \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\" (UID: \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\") " Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.702126 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-config-data\") pod \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\" (UID: \"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9\") " Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.702656 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-utilities\") pod \"redhat-operators-hlz49\" (UID: \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\") " pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.703222 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-utilities\") pod \"redhat-operators-hlz49\" (UID: \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\") " pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.703312 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-catalog-content\") pod \"redhat-operators-hlz49\" (UID: \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\") " pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.703705 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2szzr\" (UniqueName: \"kubernetes.io/projected/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-kube-api-access-2szzr\") pod \"redhat-operators-hlz49\" (UID: \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\") " pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.703568 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-catalog-content\") pod \"redhat-operators-hlz49\" (UID: \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\") " pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.713339 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-kube-api-access-dhrs9" (OuterVolumeSpecName: "kube-api-access-dhrs9") pod "166e24ea-2c2f-4503-a5de-4f0ceb9f31c9" (UID: "166e24ea-2c2f-4503-a5de-4f0ceb9f31c9"). InnerVolumeSpecName "kube-api-access-dhrs9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.726026 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2szzr\" (UniqueName: \"kubernetes.io/projected/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-kube-api-access-2szzr\") pod \"redhat-operators-hlz49\" (UID: \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\") " pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.736191 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-config-data" (OuterVolumeSpecName: "config-data") pod "166e24ea-2c2f-4503-a5de-4f0ceb9f31c9" (UID: "166e24ea-2c2f-4503-a5de-4f0ceb9f31c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.751953 5016 generic.go:334] "Generic (PLEG): container finished" podID="166e24ea-2c2f-4503-a5de-4f0ceb9f31c9" containerID="6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5" exitCode=0 Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.752200 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" containerName="nova-metadata-log" containerID="cri-o://a7e1bbc610e23616d90e2a7f69b97ba6b034a90e96aff1bd058bbd6ddb184a88" gracePeriod=30 Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.752299 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.752792 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9","Type":"ContainerDied","Data":"6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5"} Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.752822 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"166e24ea-2c2f-4503-a5de-4f0ceb9f31c9","Type":"ContainerDied","Data":"2dcec8f69e883fa779276fe98254ed3bf5bcbb5a818704223fe0202a9aa4a5e8"} Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.752843 5016 scope.go:117] "RemoveContainer" containerID="6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.753389 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" containerName="nova-metadata-metadata" containerID="cri-o://27743bda5dfb69f5a4a21201197e2774ef32201f52e12c9c3d2039ca7624fa7d" gracePeriod=30 Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.771513 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "166e24ea-2c2f-4503-a5de-4f0ceb9f31c9" (UID: "166e24ea-2c2f-4503-a5de-4f0ceb9f31c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.806184 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.806438 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhrs9\" (UniqueName: \"kubernetes.io/projected/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-kube-api-access-dhrs9\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.806563 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.831659 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.848800 5016 scope.go:117] "RemoveContainer" containerID="6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5" Dec 11 10:59:31 crc kubenswrapper[5016]: E1211 10:59:31.849379 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5\": container with ID starting with 6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5 not found: ID does not exist" containerID="6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5" Dec 11 10:59:31 crc kubenswrapper[5016]: I1211 10:59:31.849425 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5"} err="failed to get container status \"6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5\": rpc error: code = NotFound desc = could not find container \"6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5\": container with ID starting with 6ac12536e73a532a8ce23f9b167a45dd963d696afb045c6c2e9a8d1298ed4df5 not found: ID does not exist" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.184649 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.220158 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.241465 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:59:32 crc kubenswrapper[5016]: E1211 10:59:32.242245 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="166e24ea-2c2f-4503-a5de-4f0ceb9f31c9" containerName="nova-scheduler-scheduler" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.242277 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="166e24ea-2c2f-4503-a5de-4f0ceb9f31c9" containerName="nova-scheduler-scheduler" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.242589 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="166e24ea-2c2f-4503-a5de-4f0ceb9f31c9" containerName="nova-scheduler-scheduler" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.243605 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.251763 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.253609 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.423292 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd7590f4-6fdc-450e-8a96-e4ca6315d644-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cd7590f4-6fdc-450e-8a96-e4ca6315d644\") " pod="openstack/nova-scheduler-0" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.423380 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd7590f4-6fdc-450e-8a96-e4ca6315d644-config-data\") pod \"nova-scheduler-0\" (UID: \"cd7590f4-6fdc-450e-8a96-e4ca6315d644\") " pod="openstack/nova-scheduler-0" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.423618 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qk87l\" (UniqueName: \"kubernetes.io/projected/cd7590f4-6fdc-450e-8a96-e4ca6315d644-kube-api-access-qk87l\") pod \"nova-scheduler-0\" (UID: \"cd7590f4-6fdc-450e-8a96-e4ca6315d644\") " pod="openstack/nova-scheduler-0" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.527131 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qk87l\" (UniqueName: \"kubernetes.io/projected/cd7590f4-6fdc-450e-8a96-e4ca6315d644-kube-api-access-qk87l\") pod \"nova-scheduler-0\" (UID: \"cd7590f4-6fdc-450e-8a96-e4ca6315d644\") " pod="openstack/nova-scheduler-0" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.528856 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd7590f4-6fdc-450e-8a96-e4ca6315d644-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cd7590f4-6fdc-450e-8a96-e4ca6315d644\") " pod="openstack/nova-scheduler-0" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.530536 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd7590f4-6fdc-450e-8a96-e4ca6315d644-config-data\") pod \"nova-scheduler-0\" (UID: \"cd7590f4-6fdc-450e-8a96-e4ca6315d644\") " pod="openstack/nova-scheduler-0" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.542016 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd7590f4-6fdc-450e-8a96-e4ca6315d644-config-data\") pod \"nova-scheduler-0\" (UID: \"cd7590f4-6fdc-450e-8a96-e4ca6315d644\") " pod="openstack/nova-scheduler-0" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.559912 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qk87l\" (UniqueName: \"kubernetes.io/projected/cd7590f4-6fdc-450e-8a96-e4ca6315d644-kube-api-access-qk87l\") pod \"nova-scheduler-0\" (UID: \"cd7590f4-6fdc-450e-8a96-e4ca6315d644\") " pod="openstack/nova-scheduler-0" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.561006 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd7590f4-6fdc-450e-8a96-e4ca6315d644-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cd7590f4-6fdc-450e-8a96-e4ca6315d644\") " pod="openstack/nova-scheduler-0" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.606525 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.617464 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hlz49"] Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.789338 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hlz49" event={"ID":"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586","Type":"ContainerStarted","Data":"80fad1e26b27c3f484b792b1636a74c459a2f86e7e5c0ae35cfb87516785bfd9"} Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.798381 5016 generic.go:334] "Generic (PLEG): container finished" podID="77f7fa0a-5732-4160-82e7-09b358f28403" containerID="a7e1bbc610e23616d90e2a7f69b97ba6b034a90e96aff1bd058bbd6ddb184a88" exitCode=143 Dec 11 10:59:32 crc kubenswrapper[5016]: I1211 10:59:32.798496 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"77f7fa0a-5732-4160-82e7-09b358f28403","Type":"ContainerDied","Data":"a7e1bbc610e23616d90e2a7f69b97ba6b034a90e96aff1bd058bbd6ddb184a88"} Dec 11 10:59:33 crc kubenswrapper[5016]: W1211 10:59:33.153526 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd7590f4_6fdc_450e_8a96_e4ca6315d644.slice/crio-65b5ee286c300ca7fa1574ff1e8430d68b606452a5cc6c5bf8f0119997b4716d WatchSource:0}: Error finding container 65b5ee286c300ca7fa1574ff1e8430d68b606452a5cc6c5bf8f0119997b4716d: Status 404 returned error can't find the container with id 65b5ee286c300ca7fa1574ff1e8430d68b606452a5cc6c5bf8f0119997b4716d Dec 11 10:59:33 crc kubenswrapper[5016]: I1211 10:59:33.157777 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 11 10:59:33 crc kubenswrapper[5016]: I1211 10:59:33.492696 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="166e24ea-2c2f-4503-a5de-4f0ceb9f31c9" path="/var/lib/kubelet/pods/166e24ea-2c2f-4503-a5de-4f0ceb9f31c9/volumes" Dec 11 10:59:33 crc kubenswrapper[5016]: I1211 10:59:33.813021 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cd7590f4-6fdc-450e-8a96-e4ca6315d644","Type":"ContainerStarted","Data":"540a105ea8ae6dd1411d08529faf6ace0274edbe920a2b974979b94e1651ab94"} Dec 11 10:59:33 crc kubenswrapper[5016]: I1211 10:59:33.813094 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cd7590f4-6fdc-450e-8a96-e4ca6315d644","Type":"ContainerStarted","Data":"65b5ee286c300ca7fa1574ff1e8430d68b606452a5cc6c5bf8f0119997b4716d"} Dec 11 10:59:33 crc kubenswrapper[5016]: I1211 10:59:33.815113 5016 generic.go:334] "Generic (PLEG): container finished" podID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" containerID="95b4aa0b677b92252cbc54928a587c4cb31e1e9694c0503f84f3e117d2b34080" exitCode=0 Dec 11 10:59:33 crc kubenswrapper[5016]: I1211 10:59:33.815191 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ac8ef276-d90a-4d13-b8b1-fc452060abfb","Type":"ContainerDied","Data":"95b4aa0b677b92252cbc54928a587c4cb31e1e9694c0503f84f3e117d2b34080"} Dec 11 10:59:33 crc kubenswrapper[5016]: I1211 10:59:33.816841 5016 generic.go:334] "Generic (PLEG): container finished" podID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" containerID="84e0d571b68e4a33a5635ca8d2b31669cb391a85c760b767f179d9b037e065ff" exitCode=0 Dec 11 10:59:33 crc kubenswrapper[5016]: I1211 10:59:33.816875 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hlz49" event={"ID":"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586","Type":"ContainerDied","Data":"84e0d571b68e4a33a5635ca8d2b31669cb391a85c760b767f179d9b037e065ff"} Dec 11 10:59:33 crc kubenswrapper[5016]: I1211 10:59:33.884832 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.8848009970000001 podStartE2EDuration="1.884800997s" podCreationTimestamp="2025-12-11 10:59:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:59:33.856978066 +0000 UTC m=+1490.675537685" watchObservedRunningTime="2025-12-11 10:59:33.884800997 +0000 UTC m=+1490.703360576" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.278238 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.380117 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-config-data\") pod \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.380389 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-internal-tls-certs\") pod \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.380440 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-combined-ca-bundle\") pod \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.380460 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-public-tls-certs\") pod \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.380554 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac8ef276-d90a-4d13-b8b1-fc452060abfb-logs\") pod \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.381221 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mczmp\" (UniqueName: \"kubernetes.io/projected/ac8ef276-d90a-4d13-b8b1-fc452060abfb-kube-api-access-mczmp\") pod \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\" (UID: \"ac8ef276-d90a-4d13-b8b1-fc452060abfb\") " Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.382014 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac8ef276-d90a-4d13-b8b1-fc452060abfb-logs" (OuterVolumeSpecName: "logs") pod "ac8ef276-d90a-4d13-b8b1-fc452060abfb" (UID: "ac8ef276-d90a-4d13-b8b1-fc452060abfb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.404649 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac8ef276-d90a-4d13-b8b1-fc452060abfb-kube-api-access-mczmp" (OuterVolumeSpecName: "kube-api-access-mczmp") pod "ac8ef276-d90a-4d13-b8b1-fc452060abfb" (UID: "ac8ef276-d90a-4d13-b8b1-fc452060abfb"). InnerVolumeSpecName "kube-api-access-mczmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.429364 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-config-data" (OuterVolumeSpecName: "config-data") pod "ac8ef276-d90a-4d13-b8b1-fc452060abfb" (UID: "ac8ef276-d90a-4d13-b8b1-fc452060abfb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.443412 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ac8ef276-d90a-4d13-b8b1-fc452060abfb" (UID: "ac8ef276-d90a-4d13-b8b1-fc452060abfb"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.444584 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ac8ef276-d90a-4d13-b8b1-fc452060abfb" (UID: "ac8ef276-d90a-4d13-b8b1-fc452060abfb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.474238 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ac8ef276-d90a-4d13-b8b1-fc452060abfb" (UID: "ac8ef276-d90a-4d13-b8b1-fc452060abfb"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.483358 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.483401 5016 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.483415 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.483427 5016 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac8ef276-d90a-4d13-b8b1-fc452060abfb-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.483441 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac8ef276-d90a-4d13-b8b1-fc452060abfb-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.483453 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mczmp\" (UniqueName: \"kubernetes.io/projected/ac8ef276-d90a-4d13-b8b1-fc452060abfb-kube-api-access-mczmp\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.832483 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.832549 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ac8ef276-d90a-4d13-b8b1-fc452060abfb","Type":"ContainerDied","Data":"40f89a2c181cf223e4cd349d8acedcc1880653d86b93698f279bedf1292edac5"} Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.833420 5016 scope.go:117] "RemoveContainer" containerID="95b4aa0b677b92252cbc54928a587c4cb31e1e9694c0503f84f3e117d2b34080" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.837112 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hlz49" event={"ID":"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586","Type":"ContainerStarted","Data":"a6ac41fb4bbed53823e9f6cd8727862dfe04d65d6755135ff26042d5204ed297"} Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.873566 5016 scope.go:117] "RemoveContainer" containerID="af91ba04309a4f7ea6376546512934134eecb3fe819c90e3737c5eb17bc53fd4" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.900923 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.926154 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.948409 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 11 10:59:34 crc kubenswrapper[5016]: E1211 10:59:34.949612 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" containerName="nova-api-api" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.949636 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" containerName="nova-api-api" Dec 11 10:59:34 crc kubenswrapper[5016]: E1211 10:59:34.949680 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" containerName="nova-api-log" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.949689 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" containerName="nova-api-log" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.950291 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" containerName="nova-api-api" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.950338 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" containerName="nova-api-log" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.952740 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.961270 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.963152 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.963438 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 11 10:59:34 crc kubenswrapper[5016]: I1211 10:59:34.989398 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.029931 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": read tcp 10.217.0.2:47160->10.217.0.193:8775: read: connection reset by peer" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.029851 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": read tcp 10.217.0.2:47176->10.217.0.193:8775: read: connection reset by peer" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.099358 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e23cc12c-b028-49ec-ba40-adb9ad2baf59-logs\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.099456 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8thnc\" (UniqueName: \"kubernetes.io/projected/e23cc12c-b028-49ec-ba40-adb9ad2baf59-kube-api-access-8thnc\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.099498 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e23cc12c-b028-49ec-ba40-adb9ad2baf59-config-data\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.099533 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e23cc12c-b028-49ec-ba40-adb9ad2baf59-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.100645 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cc12c-b028-49ec-ba40-adb9ad2baf59-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.100731 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cc12c-b028-49ec-ba40-adb9ad2baf59-public-tls-certs\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.202624 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e23cc12c-b028-49ec-ba40-adb9ad2baf59-logs\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.202733 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8thnc\" (UniqueName: \"kubernetes.io/projected/e23cc12c-b028-49ec-ba40-adb9ad2baf59-kube-api-access-8thnc\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.202771 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e23cc12c-b028-49ec-ba40-adb9ad2baf59-config-data\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.202804 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e23cc12c-b028-49ec-ba40-adb9ad2baf59-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.202838 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cc12c-b028-49ec-ba40-adb9ad2baf59-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.202874 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cc12c-b028-49ec-ba40-adb9ad2baf59-public-tls-certs\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.204386 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e23cc12c-b028-49ec-ba40-adb9ad2baf59-logs\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.208031 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e23cc12c-b028-49ec-ba40-adb9ad2baf59-config-data\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.208169 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cc12c-b028-49ec-ba40-adb9ad2baf59-public-tls-certs\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.208590 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e23cc12c-b028-49ec-ba40-adb9ad2baf59-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.208965 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e23cc12c-b028-49ec-ba40-adb9ad2baf59-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.221373 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8thnc\" (UniqueName: \"kubernetes.io/projected/e23cc12c-b028-49ec-ba40-adb9ad2baf59-kube-api-access-8thnc\") pod \"nova-api-0\" (UID: \"e23cc12c-b028-49ec-ba40-adb9ad2baf59\") " pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.304379 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.488878 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac8ef276-d90a-4d13-b8b1-fc452060abfb" path="/var/lib/kubelet/pods/ac8ef276-d90a-4d13-b8b1-fc452060abfb/volumes" Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.867410 5016 generic.go:334] "Generic (PLEG): container finished" podID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" containerID="a6ac41fb4bbed53823e9f6cd8727862dfe04d65d6755135ff26042d5204ed297" exitCode=0 Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.867963 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hlz49" event={"ID":"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586","Type":"ContainerDied","Data":"a6ac41fb4bbed53823e9f6cd8727862dfe04d65d6755135ff26042d5204ed297"} Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.880248 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.883263 5016 generic.go:334] "Generic (PLEG): container finished" podID="77f7fa0a-5732-4160-82e7-09b358f28403" containerID="27743bda5dfb69f5a4a21201197e2774ef32201f52e12c9c3d2039ca7624fa7d" exitCode=0 Dec 11 10:59:35 crc kubenswrapper[5016]: I1211 10:59:35.883360 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"77f7fa0a-5732-4160-82e7-09b358f28403","Type":"ContainerDied","Data":"27743bda5dfb69f5a4a21201197e2774ef32201f52e12c9c3d2039ca7624fa7d"} Dec 11 10:59:35 crc kubenswrapper[5016]: W1211 10:59:35.911339 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode23cc12c_b028_49ec_ba40_adb9ad2baf59.slice/crio-5682505f4db7676ec5b1d153cad4893b051c6f302e5520ea3c547be42b8e280a WatchSource:0}: Error finding container 5682505f4db7676ec5b1d153cad4893b051c6f302e5520ea3c547be42b8e280a: Status 404 returned error can't find the container with id 5682505f4db7676ec5b1d153cad4893b051c6f302e5520ea3c547be42b8e280a Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.297473 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.435824 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-combined-ca-bundle\") pod \"77f7fa0a-5732-4160-82e7-09b358f28403\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.435975 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77f7fa0a-5732-4160-82e7-09b358f28403-logs\") pod \"77f7fa0a-5732-4160-82e7-09b358f28403\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.436030 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ggnd7\" (UniqueName: \"kubernetes.io/projected/77f7fa0a-5732-4160-82e7-09b358f28403-kube-api-access-ggnd7\") pod \"77f7fa0a-5732-4160-82e7-09b358f28403\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.436058 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-config-data\") pod \"77f7fa0a-5732-4160-82e7-09b358f28403\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.436167 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-nova-metadata-tls-certs\") pod \"77f7fa0a-5732-4160-82e7-09b358f28403\" (UID: \"77f7fa0a-5732-4160-82e7-09b358f28403\") " Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.437794 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77f7fa0a-5732-4160-82e7-09b358f28403-logs" (OuterVolumeSpecName: "logs") pod "77f7fa0a-5732-4160-82e7-09b358f28403" (UID: "77f7fa0a-5732-4160-82e7-09b358f28403"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.488179 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77f7fa0a-5732-4160-82e7-09b358f28403-kube-api-access-ggnd7" (OuterVolumeSpecName: "kube-api-access-ggnd7") pod "77f7fa0a-5732-4160-82e7-09b358f28403" (UID: "77f7fa0a-5732-4160-82e7-09b358f28403"). InnerVolumeSpecName "kube-api-access-ggnd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.539706 5016 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77f7fa0a-5732-4160-82e7-09b358f28403-logs\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.539744 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ggnd7\" (UniqueName: \"kubernetes.io/projected/77f7fa0a-5732-4160-82e7-09b358f28403-kube-api-access-ggnd7\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.562008 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-config-data" (OuterVolumeSpecName: "config-data") pod "77f7fa0a-5732-4160-82e7-09b358f28403" (UID: "77f7fa0a-5732-4160-82e7-09b358f28403"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.588266 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77f7fa0a-5732-4160-82e7-09b358f28403" (UID: "77f7fa0a-5732-4160-82e7-09b358f28403"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.608351 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "77f7fa0a-5732-4160-82e7-09b358f28403" (UID: "77f7fa0a-5732-4160-82e7-09b358f28403"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.645816 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.645868 5016 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.645884 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77f7fa0a-5732-4160-82e7-09b358f28403-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.901861 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e23cc12c-b028-49ec-ba40-adb9ad2baf59","Type":"ContainerStarted","Data":"c84136c66a26afaceed0d488ad2168a200c8f74f2df884e5606b534486f3b2de"} Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.903281 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e23cc12c-b028-49ec-ba40-adb9ad2baf59","Type":"ContainerStarted","Data":"5682505f4db7676ec5b1d153cad4893b051c6f302e5520ea3c547be42b8e280a"} Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.908173 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"77f7fa0a-5732-4160-82e7-09b358f28403","Type":"ContainerDied","Data":"618525b4f7f1471341327faec930dfc425f7f7d151a69a49d257a578c070cc5e"} Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.908258 5016 scope.go:117] "RemoveContainer" containerID="27743bda5dfb69f5a4a21201197e2774ef32201f52e12c9c3d2039ca7624fa7d" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.908419 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.942845 5016 scope.go:117] "RemoveContainer" containerID="a7e1bbc610e23616d90e2a7f69b97ba6b034a90e96aff1bd058bbd6ddb184a88" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.964195 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.979526 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.996196 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:59:36 crc kubenswrapper[5016]: E1211 10:59:36.996959 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" containerName="nova-metadata-log" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.996991 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" containerName="nova-metadata-log" Dec 11 10:59:36 crc kubenswrapper[5016]: E1211 10:59:36.997041 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" containerName="nova-metadata-metadata" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.997051 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" containerName="nova-metadata-metadata" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.997327 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" containerName="nova-metadata-metadata" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.997373 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" containerName="nova-metadata-log" Dec 11 10:59:36 crc kubenswrapper[5016]: I1211 10:59:36.998958 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.001811 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.002131 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.013789 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.165366 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b74b056a-931e-4c8f-809d-025693ae2e9c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.165685 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b74b056a-931e-4c8f-809d-025693ae2e9c-logs\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.165950 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b74b056a-931e-4c8f-809d-025693ae2e9c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.166146 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4j8k4\" (UniqueName: \"kubernetes.io/projected/b74b056a-931e-4c8f-809d-025693ae2e9c-kube-api-access-4j8k4\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.166225 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b74b056a-931e-4c8f-809d-025693ae2e9c-config-data\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.268732 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b74b056a-931e-4c8f-809d-025693ae2e9c-logs\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.269242 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b74b056a-931e-4c8f-809d-025693ae2e9c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.269300 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4j8k4\" (UniqueName: \"kubernetes.io/projected/b74b056a-931e-4c8f-809d-025693ae2e9c-kube-api-access-4j8k4\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.269326 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b74b056a-931e-4c8f-809d-025693ae2e9c-config-data\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.269379 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b74b056a-931e-4c8f-809d-025693ae2e9c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.269530 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b74b056a-931e-4c8f-809d-025693ae2e9c-logs\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.274540 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b74b056a-931e-4c8f-809d-025693ae2e9c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.275492 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b74b056a-931e-4c8f-809d-025693ae2e9c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.276603 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b74b056a-931e-4c8f-809d-025693ae2e9c-config-data\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.289490 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4j8k4\" (UniqueName: \"kubernetes.io/projected/b74b056a-931e-4c8f-809d-025693ae2e9c-kube-api-access-4j8k4\") pod \"nova-metadata-0\" (UID: \"b74b056a-931e-4c8f-809d-025693ae2e9c\") " pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.319815 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.512925 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77f7fa0a-5732-4160-82e7-09b358f28403" path="/var/lib/kubelet/pods/77f7fa0a-5732-4160-82e7-09b358f28403/volumes" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.606858 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.854680 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.947219 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hlz49" event={"ID":"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586","Type":"ContainerStarted","Data":"ec172dfbb2a0fa8886d8a9d700388d7fe5314c8bf4a679cd044b8430644c256b"} Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.951948 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e23cc12c-b028-49ec-ba40-adb9ad2baf59","Type":"ContainerStarted","Data":"d927bc7f3171a870d0e65d06a28bc2b84019b73ef28b3219884cc2327d200a08"} Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.953350 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b74b056a-931e-4c8f-809d-025693ae2e9c","Type":"ContainerStarted","Data":"27dcbc760c8ec549668313f9dd3f110cbcf20ea2f41fe9e7875348105c9371d0"} Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.970660 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hlz49" podStartSLOduration=4.060614521 podStartE2EDuration="6.970643076s" podCreationTimestamp="2025-12-11 10:59:31 +0000 UTC" firstStartedPulling="2025-12-11 10:59:33.818793415 +0000 UTC m=+1490.637352994" lastFinishedPulling="2025-12-11 10:59:36.72882197 +0000 UTC m=+1493.547381549" observedRunningTime="2025-12-11 10:59:37.968979616 +0000 UTC m=+1494.787539195" watchObservedRunningTime="2025-12-11 10:59:37.970643076 +0000 UTC m=+1494.789202655" Dec 11 10:59:37 crc kubenswrapper[5016]: I1211 10:59:37.998993 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.99897043 podStartE2EDuration="3.99897043s" podCreationTimestamp="2025-12-11 10:59:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:59:37.996038019 +0000 UTC m=+1494.814597618" watchObservedRunningTime="2025-12-11 10:59:37.99897043 +0000 UTC m=+1494.817530019" Dec 11 10:59:38 crc kubenswrapper[5016]: I1211 10:59:38.966134 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b74b056a-931e-4c8f-809d-025693ae2e9c","Type":"ContainerStarted","Data":"9159eb41c4d861cc09861097f2d435d28fc82cd120cdfd9677b36b50370dede6"} Dec 11 10:59:38 crc kubenswrapper[5016]: I1211 10:59:38.966391 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b74b056a-931e-4c8f-809d-025693ae2e9c","Type":"ContainerStarted","Data":"a192388120659f8da9052582b2850f4bceb3fc119bae1fa787f761f8cce91fa7"} Dec 11 10:59:38 crc kubenswrapper[5016]: I1211 10:59:38.994641 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.994621785 podStartE2EDuration="2.994621785s" podCreationTimestamp="2025-12-11 10:59:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 10:59:38.994254107 +0000 UTC m=+1495.812813686" watchObservedRunningTime="2025-12-11 10:59:38.994621785 +0000 UTC m=+1495.813181364" Dec 11 10:59:41 crc kubenswrapper[5016]: I1211 10:59:41.832231 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:41 crc kubenswrapper[5016]: I1211 10:59:41.833119 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:42 crc kubenswrapper[5016]: I1211 10:59:42.321238 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 11 10:59:42 crc kubenswrapper[5016]: I1211 10:59:42.321298 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 11 10:59:42 crc kubenswrapper[5016]: I1211 10:59:42.607117 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 11 10:59:42 crc kubenswrapper[5016]: I1211 10:59:42.641113 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 11 10:59:42 crc kubenswrapper[5016]: I1211 10:59:42.884767 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hlz49" podUID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" containerName="registry-server" probeResult="failure" output=< Dec 11 10:59:42 crc kubenswrapper[5016]: timeout: failed to connect service ":50051" within 1s Dec 11 10:59:42 crc kubenswrapper[5016]: > Dec 11 10:59:42 crc kubenswrapper[5016]: I1211 10:59:42.932850 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 10:59:42 crc kubenswrapper[5016]: I1211 10:59:42.932911 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 10:59:42 crc kubenswrapper[5016]: I1211 10:59:42.932990 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 10:59:42 crc kubenswrapper[5016]: I1211 10:59:42.933792 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"53da38b3e027c864a9592c4787654311b819c80dc57e5ec065e90c602166ceee"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 10:59:42 crc kubenswrapper[5016]: I1211 10:59:42.933859 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://53da38b3e027c864a9592c4787654311b819c80dc57e5ec065e90c602166ceee" gracePeriod=600 Dec 11 10:59:43 crc kubenswrapper[5016]: I1211 10:59:43.061172 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 11 10:59:45 crc kubenswrapper[5016]: I1211 10:59:45.304883 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 10:59:45 crc kubenswrapper[5016]: I1211 10:59:45.305489 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 11 10:59:46 crc kubenswrapper[5016]: I1211 10:59:46.317218 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e23cc12c-b028-49ec-ba40-adb9ad2baf59" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.201:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 10:59:46 crc kubenswrapper[5016]: I1211 10:59:46.317262 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e23cc12c-b028-49ec-ba40-adb9ad2baf59" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.201:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 10:59:47 crc kubenswrapper[5016]: I1211 10:59:47.048589 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="53da38b3e027c864a9592c4787654311b819c80dc57e5ec065e90c602166ceee" exitCode=0 Dec 11 10:59:47 crc kubenswrapper[5016]: I1211 10:59:47.049440 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"53da38b3e027c864a9592c4787654311b819c80dc57e5ec065e90c602166ceee"} Dec 11 10:59:47 crc kubenswrapper[5016]: I1211 10:59:47.049498 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756"} Dec 11 10:59:47 crc kubenswrapper[5016]: I1211 10:59:47.049519 5016 scope.go:117] "RemoveContainer" containerID="49f5883716361ecf20e37d0a33857b58813542483a33785fbd7c2c019dd8b594" Dec 11 10:59:47 crc kubenswrapper[5016]: I1211 10:59:47.050575 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 11 10:59:47 crc kubenswrapper[5016]: I1211 10:59:47.322296 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 11 10:59:47 crc kubenswrapper[5016]: I1211 10:59:47.322345 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 11 10:59:48 crc kubenswrapper[5016]: I1211 10:59:48.336413 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="b74b056a-931e-4c8f-809d-025693ae2e9c" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 10:59:48 crc kubenswrapper[5016]: I1211 10:59:48.336439 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="b74b056a-931e-4c8f-809d-025693ae2e9c" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 11 10:59:51 crc kubenswrapper[5016]: I1211 10:59:51.890127 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:51 crc kubenswrapper[5016]: I1211 10:59:51.944214 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:52 crc kubenswrapper[5016]: I1211 10:59:52.133920 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hlz49"] Dec 11 10:59:53 crc kubenswrapper[5016]: I1211 10:59:53.156909 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hlz49" podUID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" containerName="registry-server" containerID="cri-o://ec172dfbb2a0fa8886d8a9d700388d7fe5314c8bf4a679cd044b8430644c256b" gracePeriod=2 Dec 11 10:59:54 crc kubenswrapper[5016]: I1211 10:59:54.168024 5016 generic.go:334] "Generic (PLEG): container finished" podID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" containerID="ec172dfbb2a0fa8886d8a9d700388d7fe5314c8bf4a679cd044b8430644c256b" exitCode=0 Dec 11 10:59:54 crc kubenswrapper[5016]: I1211 10:59:54.168286 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hlz49" event={"ID":"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586","Type":"ContainerDied","Data":"ec172dfbb2a0fa8886d8a9d700388d7fe5314c8bf4a679cd044b8430644c256b"} Dec 11 10:59:54 crc kubenswrapper[5016]: I1211 10:59:54.432921 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:54 crc kubenswrapper[5016]: I1211 10:59:54.573078 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-utilities\") pod \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\" (UID: \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\") " Dec 11 10:59:54 crc kubenswrapper[5016]: I1211 10:59:54.573215 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-catalog-content\") pod \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\" (UID: \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\") " Dec 11 10:59:54 crc kubenswrapper[5016]: I1211 10:59:54.574162 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-utilities" (OuterVolumeSpecName: "utilities") pod "60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" (UID: "60bbcfaa-cf1d-4cef-a2e4-f79e7072b586"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:59:54 crc kubenswrapper[5016]: I1211 10:59:54.579355 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2szzr\" (UniqueName: \"kubernetes.io/projected/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-kube-api-access-2szzr\") pod \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\" (UID: \"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586\") " Dec 11 10:59:54 crc kubenswrapper[5016]: I1211 10:59:54.580336 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:54 crc kubenswrapper[5016]: I1211 10:59:54.586145 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-kube-api-access-2szzr" (OuterVolumeSpecName: "kube-api-access-2szzr") pod "60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" (UID: "60bbcfaa-cf1d-4cef-a2e4-f79e7072b586"). InnerVolumeSpecName "kube-api-access-2szzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 10:59:54 crc kubenswrapper[5016]: I1211 10:59:54.681382 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" (UID: "60bbcfaa-cf1d-4cef-a2e4-f79e7072b586"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 10:59:54 crc kubenswrapper[5016]: I1211 10:59:54.683084 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:54 crc kubenswrapper[5016]: I1211 10:59:54.683124 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2szzr\" (UniqueName: \"kubernetes.io/projected/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586-kube-api-access-2szzr\") on node \"crc\" DevicePath \"\"" Dec 11 10:59:55 crc kubenswrapper[5016]: I1211 10:59:55.180981 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hlz49" event={"ID":"60bbcfaa-cf1d-4cef-a2e4-f79e7072b586","Type":"ContainerDied","Data":"80fad1e26b27c3f484b792b1636a74c459a2f86e7e5c0ae35cfb87516785bfd9"} Dec 11 10:59:55 crc kubenswrapper[5016]: I1211 10:59:55.181467 5016 scope.go:117] "RemoveContainer" containerID="ec172dfbb2a0fa8886d8a9d700388d7fe5314c8bf4a679cd044b8430644c256b" Dec 11 10:59:55 crc kubenswrapper[5016]: I1211 10:59:55.181035 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hlz49" Dec 11 10:59:55 crc kubenswrapper[5016]: I1211 10:59:55.232185 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hlz49"] Dec 11 10:59:55 crc kubenswrapper[5016]: I1211 10:59:55.237146 5016 scope.go:117] "RemoveContainer" containerID="a6ac41fb4bbed53823e9f6cd8727862dfe04d65d6755135ff26042d5204ed297" Dec 11 10:59:55 crc kubenswrapper[5016]: I1211 10:59:55.242047 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hlz49"] Dec 11 10:59:55 crc kubenswrapper[5016]: I1211 10:59:55.263424 5016 scope.go:117] "RemoveContainer" containerID="84e0d571b68e4a33a5635ca8d2b31669cb391a85c760b767f179d9b037e065ff" Dec 11 10:59:55 crc kubenswrapper[5016]: I1211 10:59:55.313277 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 11 10:59:55 crc kubenswrapper[5016]: I1211 10:59:55.314816 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 11 10:59:55 crc kubenswrapper[5016]: I1211 10:59:55.314957 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 11 10:59:55 crc kubenswrapper[5016]: I1211 10:59:55.321639 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 11 10:59:55 crc kubenswrapper[5016]: I1211 10:59:55.485993 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" path="/var/lib/kubelet/pods/60bbcfaa-cf1d-4cef-a2e4-f79e7072b586/volumes" Dec 11 10:59:56 crc kubenswrapper[5016]: I1211 10:59:56.194515 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 11 10:59:56 crc kubenswrapper[5016]: I1211 10:59:56.201683 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 11 10:59:57 crc kubenswrapper[5016]: I1211 10:59:57.327377 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 11 10:59:57 crc kubenswrapper[5016]: I1211 10:59:57.327747 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 11 10:59:57 crc kubenswrapper[5016]: I1211 10:59:57.332351 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 11 10:59:57 crc kubenswrapper[5016]: I1211 10:59:57.332719 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.174804 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp"] Dec 11 11:00:00 crc kubenswrapper[5016]: E1211 11:00:00.175552 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" containerName="extract-utilities" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.175566 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" containerName="extract-utilities" Dec 11 11:00:00 crc kubenswrapper[5016]: E1211 11:00:00.175594 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" containerName="registry-server" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.175602 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" containerName="registry-server" Dec 11 11:00:00 crc kubenswrapper[5016]: E1211 11:00:00.175612 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" containerName="extract-content" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.175644 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" containerName="extract-content" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.175892 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="60bbcfaa-cf1d-4cef-a2e4-f79e7072b586" containerName="registry-server" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.176654 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.180342 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.180427 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.189964 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp"] Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.299357 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57tt7\" (UniqueName: \"kubernetes.io/projected/1172fa02-d3bf-4eb3-96f0-f9f224625c46-kube-api-access-57tt7\") pod \"collect-profiles-29424180-slbkp\" (UID: \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.299570 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1172fa02-d3bf-4eb3-96f0-f9f224625c46-config-volume\") pod \"collect-profiles-29424180-slbkp\" (UID: \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.299726 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1172fa02-d3bf-4eb3-96f0-f9f224625c46-secret-volume\") pod \"collect-profiles-29424180-slbkp\" (UID: \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.401604 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1172fa02-d3bf-4eb3-96f0-f9f224625c46-config-volume\") pod \"collect-profiles-29424180-slbkp\" (UID: \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.402447 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1172fa02-d3bf-4eb3-96f0-f9f224625c46-config-volume\") pod \"collect-profiles-29424180-slbkp\" (UID: \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.402580 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1172fa02-d3bf-4eb3-96f0-f9f224625c46-secret-volume\") pod \"collect-profiles-29424180-slbkp\" (UID: \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.402792 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57tt7\" (UniqueName: \"kubernetes.io/projected/1172fa02-d3bf-4eb3-96f0-f9f224625c46-kube-api-access-57tt7\") pod \"collect-profiles-29424180-slbkp\" (UID: \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.408635 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1172fa02-d3bf-4eb3-96f0-f9f224625c46-secret-volume\") pod \"collect-profiles-29424180-slbkp\" (UID: \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.423836 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57tt7\" (UniqueName: \"kubernetes.io/projected/1172fa02-d3bf-4eb3-96f0-f9f224625c46-kube-api-access-57tt7\") pod \"collect-profiles-29424180-slbkp\" (UID: \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.533272 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:00 crc kubenswrapper[5016]: I1211 11:00:00.946685 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp"] Dec 11 11:00:01 crc kubenswrapper[5016]: I1211 11:00:01.247350 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" event={"ID":"1172fa02-d3bf-4eb3-96f0-f9f224625c46","Type":"ContainerStarted","Data":"03965dfb551484cc6504c58ac9408103b6430b3bfebe9cb08712f3ecee1cbccc"} Dec 11 11:00:01 crc kubenswrapper[5016]: I1211 11:00:01.247406 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" event={"ID":"1172fa02-d3bf-4eb3-96f0-f9f224625c46","Type":"ContainerStarted","Data":"c9f7dbe35330522aa7406f7ffe66c189dd253d33ed5bc5d9f3594e05792182ce"} Dec 11 11:00:02 crc kubenswrapper[5016]: I1211 11:00:02.261226 5016 generic.go:334] "Generic (PLEG): container finished" podID="1172fa02-d3bf-4eb3-96f0-f9f224625c46" containerID="03965dfb551484cc6504c58ac9408103b6430b3bfebe9cb08712f3ecee1cbccc" exitCode=0 Dec 11 11:00:02 crc kubenswrapper[5016]: I1211 11:00:02.261314 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" event={"ID":"1172fa02-d3bf-4eb3-96f0-f9f224625c46","Type":"ContainerDied","Data":"03965dfb551484cc6504c58ac9408103b6430b3bfebe9cb08712f3ecee1cbccc"} Dec 11 11:00:03 crc kubenswrapper[5016]: I1211 11:00:03.613885 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:03 crc kubenswrapper[5016]: I1211 11:00:03.794107 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1172fa02-d3bf-4eb3-96f0-f9f224625c46-secret-volume\") pod \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\" (UID: \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\") " Dec 11 11:00:03 crc kubenswrapper[5016]: I1211 11:00:03.794478 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-57tt7\" (UniqueName: \"kubernetes.io/projected/1172fa02-d3bf-4eb3-96f0-f9f224625c46-kube-api-access-57tt7\") pod \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\" (UID: \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\") " Dec 11 11:00:03 crc kubenswrapper[5016]: I1211 11:00:03.794657 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1172fa02-d3bf-4eb3-96f0-f9f224625c46-config-volume\") pod \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\" (UID: \"1172fa02-d3bf-4eb3-96f0-f9f224625c46\") " Dec 11 11:00:03 crc kubenswrapper[5016]: I1211 11:00:03.795593 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1172fa02-d3bf-4eb3-96f0-f9f224625c46-config-volume" (OuterVolumeSpecName: "config-volume") pod "1172fa02-d3bf-4eb3-96f0-f9f224625c46" (UID: "1172fa02-d3bf-4eb3-96f0-f9f224625c46"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:03 crc kubenswrapper[5016]: I1211 11:00:03.801551 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1172fa02-d3bf-4eb3-96f0-f9f224625c46-kube-api-access-57tt7" (OuterVolumeSpecName: "kube-api-access-57tt7") pod "1172fa02-d3bf-4eb3-96f0-f9f224625c46" (UID: "1172fa02-d3bf-4eb3-96f0-f9f224625c46"). InnerVolumeSpecName "kube-api-access-57tt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:00:03 crc kubenswrapper[5016]: I1211 11:00:03.801548 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1172fa02-d3bf-4eb3-96f0-f9f224625c46-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1172fa02-d3bf-4eb3-96f0-f9f224625c46" (UID: "1172fa02-d3bf-4eb3-96f0-f9f224625c46"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:00:03 crc kubenswrapper[5016]: I1211 11:00:03.897283 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-57tt7\" (UniqueName: \"kubernetes.io/projected/1172fa02-d3bf-4eb3-96f0-f9f224625c46-kube-api-access-57tt7\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:03 crc kubenswrapper[5016]: I1211 11:00:03.897363 5016 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1172fa02-d3bf-4eb3-96f0-f9f224625c46-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:03 crc kubenswrapper[5016]: I1211 11:00:03.897375 5016 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1172fa02-d3bf-4eb3-96f0-f9f224625c46-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:04 crc kubenswrapper[5016]: I1211 11:00:04.284137 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" event={"ID":"1172fa02-d3bf-4eb3-96f0-f9f224625c46","Type":"ContainerDied","Data":"c9f7dbe35330522aa7406f7ffe66c189dd253d33ed5bc5d9f3594e05792182ce"} Dec 11 11:00:04 crc kubenswrapper[5016]: I1211 11:00:04.284201 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9f7dbe35330522aa7406f7ffe66c189dd253d33ed5bc5d9f3594e05792182ce" Dec 11 11:00:04 crc kubenswrapper[5016]: I1211 11:00:04.284198 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp" Dec 11 11:00:07 crc kubenswrapper[5016]: I1211 11:00:07.657705 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 11:00:08 crc kubenswrapper[5016]: I1211 11:00:08.715209 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 11:00:12 crc kubenswrapper[5016]: I1211 11:00:12.655683 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="e46a21b8-75eb-49ac-8d08-0acaaa8fac37" containerName="rabbitmq" containerID="cri-o://8a88195eb63f082092d6edb63948ca9def10cea29875ffaae2b346bb818ab3dd" gracePeriod=604796 Dec 11 11:00:14 crc kubenswrapper[5016]: I1211 11:00:14.171839 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="313107c9-4bb0-49ad-a67b-7f2e4ae09753" containerName="rabbitmq" containerID="cri-o://64b5accdc2fd0c92a017d393d7f08770227f2a59966376e256f9e6643bdc204d" gracePeriod=604795 Dec 11 11:00:19 crc kubenswrapper[5016]: I1211 11:00:19.429847 5016 generic.go:334] "Generic (PLEG): container finished" podID="e46a21b8-75eb-49ac-8d08-0acaaa8fac37" containerID="8a88195eb63f082092d6edb63948ca9def10cea29875ffaae2b346bb818ab3dd" exitCode=0 Dec 11 11:00:19 crc kubenswrapper[5016]: I1211 11:00:19.430044 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e46a21b8-75eb-49ac-8d08-0acaaa8fac37","Type":"ContainerDied","Data":"8a88195eb63f082092d6edb63948ca9def10cea29875ffaae2b346bb818ab3dd"} Dec 11 11:00:19 crc kubenswrapper[5016]: I1211 11:00:19.828399 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.005684 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wc4qd\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-kube-api-access-wc4qd\") pod \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.005820 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-erlang-cookie\") pod \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.006418 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "e46a21b8-75eb-49ac-8d08-0acaaa8fac37" (UID: "e46a21b8-75eb-49ac-8d08-0acaaa8fac37"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.007182 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-config-data\") pod \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.007362 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-server-conf\") pod \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.007503 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-tls\") pod \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.007539 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-confd\") pod \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.007616 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-pod-info\") pod \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.007668 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.007729 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-plugins-conf\") pod \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.007768 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-plugins\") pod \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.007802 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-erlang-cookie-secret\") pod \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\" (UID: \"e46a21b8-75eb-49ac-8d08-0acaaa8fac37\") " Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.008500 5016 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.009729 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "e46a21b8-75eb-49ac-8d08-0acaaa8fac37" (UID: "e46a21b8-75eb-49ac-8d08-0acaaa8fac37"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.010119 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "e46a21b8-75eb-49ac-8d08-0acaaa8fac37" (UID: "e46a21b8-75eb-49ac-8d08-0acaaa8fac37"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.013760 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-kube-api-access-wc4qd" (OuterVolumeSpecName: "kube-api-access-wc4qd") pod "e46a21b8-75eb-49ac-8d08-0acaaa8fac37" (UID: "e46a21b8-75eb-49ac-8d08-0acaaa8fac37"). InnerVolumeSpecName "kube-api-access-wc4qd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.014270 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "persistence") pod "e46a21b8-75eb-49ac-8d08-0acaaa8fac37" (UID: "e46a21b8-75eb-49ac-8d08-0acaaa8fac37"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.015018 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "e46a21b8-75eb-49ac-8d08-0acaaa8fac37" (UID: "e46a21b8-75eb-49ac-8d08-0acaaa8fac37"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.015047 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "e46a21b8-75eb-49ac-8d08-0acaaa8fac37" (UID: "e46a21b8-75eb-49ac-8d08-0acaaa8fac37"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.019211 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-pod-info" (OuterVolumeSpecName: "pod-info") pod "e46a21b8-75eb-49ac-8d08-0acaaa8fac37" (UID: "e46a21b8-75eb-49ac-8d08-0acaaa8fac37"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.050122 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-config-data" (OuterVolumeSpecName: "config-data") pod "e46a21b8-75eb-49ac-8d08-0acaaa8fac37" (UID: "e46a21b8-75eb-49ac-8d08-0acaaa8fac37"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.089254 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-server-conf" (OuterVolumeSpecName: "server-conf") pod "e46a21b8-75eb-49ac-8d08-0acaaa8fac37" (UID: "e46a21b8-75eb-49ac-8d08-0acaaa8fac37"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.109903 5016 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-server-conf\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.109958 5016 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.109970 5016 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-pod-info\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.109994 5016 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.110003 5016 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.110011 5016 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.110021 5016 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.110030 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wc4qd\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-kube-api-access-wc4qd\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.110040 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.143259 5016 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.165731 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "e46a21b8-75eb-49ac-8d08-0acaaa8fac37" (UID: "e46a21b8-75eb-49ac-8d08-0acaaa8fac37"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.212061 5016 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e46a21b8-75eb-49ac-8d08-0acaaa8fac37-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.212548 5016 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.442051 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e46a21b8-75eb-49ac-8d08-0acaaa8fac37","Type":"ContainerDied","Data":"fc67a03497d7436736f45657f91ac05cb4cef827178fa748edab2439c225cd75"} Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.442125 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.442132 5016 scope.go:117] "RemoveContainer" containerID="8a88195eb63f082092d6edb63948ca9def10cea29875ffaae2b346bb818ab3dd" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.469886 5016 scope.go:117] "RemoveContainer" containerID="ccccf842dadabad37bad8683166c7169076d2baa22d9ea9bc6e44216e5739d4e" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.503097 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.522869 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.545839 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 11:00:20 crc kubenswrapper[5016]: E1211 11:00:20.546547 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e46a21b8-75eb-49ac-8d08-0acaaa8fac37" containerName="rabbitmq" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.546577 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e46a21b8-75eb-49ac-8d08-0acaaa8fac37" containerName="rabbitmq" Dec 11 11:00:20 crc kubenswrapper[5016]: E1211 11:00:20.546614 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e46a21b8-75eb-49ac-8d08-0acaaa8fac37" containerName="setup-container" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.546625 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e46a21b8-75eb-49ac-8d08-0acaaa8fac37" containerName="setup-container" Dec 11 11:00:20 crc kubenswrapper[5016]: E1211 11:00:20.546649 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1172fa02-d3bf-4eb3-96f0-f9f224625c46" containerName="collect-profiles" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.546657 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1172fa02-d3bf-4eb3-96f0-f9f224625c46" containerName="collect-profiles" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.546926 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="e46a21b8-75eb-49ac-8d08-0acaaa8fac37" containerName="rabbitmq" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.546980 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1172fa02-d3bf-4eb3-96f0-f9f224625c46" containerName="collect-profiles" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.548605 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.551766 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.556984 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.557368 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.557439 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.557583 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-92vt9" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.557753 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.557885 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.558046 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.724879 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhkkh\" (UniqueName: \"kubernetes.io/projected/86d18250-4387-46f7-af2c-2ce21bf43e12-kube-api-access-vhkkh\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.724987 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/86d18250-4387-46f7-af2c-2ce21bf43e12-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.725032 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/86d18250-4387-46f7-af2c-2ce21bf43e12-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.725129 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.725179 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/86d18250-4387-46f7-af2c-2ce21bf43e12-server-conf\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.725205 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/86d18250-4387-46f7-af2c-2ce21bf43e12-pod-info\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.725221 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/86d18250-4387-46f7-af2c-2ce21bf43e12-config-data\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.725252 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/86d18250-4387-46f7-af2c-2ce21bf43e12-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.725281 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/86d18250-4387-46f7-af2c-2ce21bf43e12-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.725307 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/86d18250-4387-46f7-af2c-2ce21bf43e12-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.725375 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/86d18250-4387-46f7-af2c-2ce21bf43e12-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.827131 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhkkh\" (UniqueName: \"kubernetes.io/projected/86d18250-4387-46f7-af2c-2ce21bf43e12-kube-api-access-vhkkh\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.827210 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/86d18250-4387-46f7-af2c-2ce21bf43e12-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.827238 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/86d18250-4387-46f7-af2c-2ce21bf43e12-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.827291 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.827325 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/86d18250-4387-46f7-af2c-2ce21bf43e12-server-conf\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.827348 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/86d18250-4387-46f7-af2c-2ce21bf43e12-pod-info\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.827365 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/86d18250-4387-46f7-af2c-2ce21bf43e12-config-data\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.827394 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/86d18250-4387-46f7-af2c-2ce21bf43e12-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.827426 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/86d18250-4387-46f7-af2c-2ce21bf43e12-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.827466 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/86d18250-4387-46f7-af2c-2ce21bf43e12-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.827536 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/86d18250-4387-46f7-af2c-2ce21bf43e12-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.828178 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/86d18250-4387-46f7-af2c-2ce21bf43e12-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.828582 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/86d18250-4387-46f7-af2c-2ce21bf43e12-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.828720 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.829311 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/86d18250-4387-46f7-af2c-2ce21bf43e12-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.829485 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/86d18250-4387-46f7-af2c-2ce21bf43e12-config-data\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.829619 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/86d18250-4387-46f7-af2c-2ce21bf43e12-server-conf\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.834115 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/86d18250-4387-46f7-af2c-2ce21bf43e12-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.834774 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/86d18250-4387-46f7-af2c-2ce21bf43e12-pod-info\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.835158 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/86d18250-4387-46f7-af2c-2ce21bf43e12-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.849754 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhkkh\" (UniqueName: \"kubernetes.io/projected/86d18250-4387-46f7-af2c-2ce21bf43e12-kube-api-access-vhkkh\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.856018 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/86d18250-4387-46f7-af2c-2ce21bf43e12-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.869748 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"86d18250-4387-46f7-af2c-2ce21bf43e12\") " pod="openstack/rabbitmq-server-0" Dec 11 11:00:20 crc kubenswrapper[5016]: I1211 11:00:20.909171 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.429579 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.474004 5016 generic.go:334] "Generic (PLEG): container finished" podID="313107c9-4bb0-49ad-a67b-7f2e4ae09753" containerID="64b5accdc2fd0c92a017d393d7f08770227f2a59966376e256f9e6643bdc204d" exitCode=0 Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.493690 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e46a21b8-75eb-49ac-8d08-0acaaa8fac37" path="/var/lib/kubelet/pods/e46a21b8-75eb-49ac-8d08-0acaaa8fac37/volumes" Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.499296 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"313107c9-4bb0-49ad-a67b-7f2e4ae09753","Type":"ContainerDied","Data":"64b5accdc2fd0c92a017d393d7f08770227f2a59966376e256f9e6643bdc204d"} Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.499366 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"86d18250-4387-46f7-af2c-2ce21bf43e12","Type":"ContainerStarted","Data":"34949d0d4cfdc766c0e418778cf30d4ff9c68feb08373c3e5fd232ad08428ee4"} Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.540118 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="313107c9-4bb0-49ad-a67b-7f2e4ae09753" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.97:5671: connect: connection refused" Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.879582 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.961238 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/313107c9-4bb0-49ad-a67b-7f2e4ae09753-erlang-cookie-secret\") pod \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.961303 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-erlang-cookie\") pod \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.961337 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-config-data\") pod \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.961414 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqp4d\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-kube-api-access-tqp4d\") pod \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.961518 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-plugins\") pod \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.961551 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.961616 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-plugins-conf\") pod \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.961690 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-tls\") pod \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.961739 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-confd\") pod \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.961769 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-server-conf\") pod \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.961832 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/313107c9-4bb0-49ad-a67b-7f2e4ae09753-pod-info\") pod \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\" (UID: \"313107c9-4bb0-49ad-a67b-7f2e4ae09753\") " Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.964237 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "313107c9-4bb0-49ad-a67b-7f2e4ae09753" (UID: "313107c9-4bb0-49ad-a67b-7f2e4ae09753"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.964624 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "313107c9-4bb0-49ad-a67b-7f2e4ae09753" (UID: "313107c9-4bb0-49ad-a67b-7f2e4ae09753"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.966120 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "313107c9-4bb0-49ad-a67b-7f2e4ae09753" (UID: "313107c9-4bb0-49ad-a67b-7f2e4ae09753"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.974073 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/313107c9-4bb0-49ad-a67b-7f2e4ae09753-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "313107c9-4bb0-49ad-a67b-7f2e4ae09753" (UID: "313107c9-4bb0-49ad-a67b-7f2e4ae09753"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.975139 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "313107c9-4bb0-49ad-a67b-7f2e4ae09753" (UID: "313107c9-4bb0-49ad-a67b-7f2e4ae09753"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.976050 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/313107c9-4bb0-49ad-a67b-7f2e4ae09753-pod-info" (OuterVolumeSpecName: "pod-info") pod "313107c9-4bb0-49ad-a67b-7f2e4ae09753" (UID: "313107c9-4bb0-49ad-a67b-7f2e4ae09753"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.976184 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "313107c9-4bb0-49ad-a67b-7f2e4ae09753" (UID: "313107c9-4bb0-49ad-a67b-7f2e4ae09753"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:00:21 crc kubenswrapper[5016]: I1211 11:00:21.991620 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-kube-api-access-tqp4d" (OuterVolumeSpecName: "kube-api-access-tqp4d") pod "313107c9-4bb0-49ad-a67b-7f2e4ae09753" (UID: "313107c9-4bb0-49ad-a67b-7f2e4ae09753"). InnerVolumeSpecName "kube-api-access-tqp4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.005344 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-config-data" (OuterVolumeSpecName: "config-data") pod "313107c9-4bb0-49ad-a67b-7f2e4ae09753" (UID: "313107c9-4bb0-49ad-a67b-7f2e4ae09753"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.035749 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-server-conf" (OuterVolumeSpecName: "server-conf") pod "313107c9-4bb0-49ad-a67b-7f2e4ae09753" (UID: "313107c9-4bb0-49ad-a67b-7f2e4ae09753"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.066298 5016 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.066341 5016 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.066352 5016 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.066364 5016 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-server-conf\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.066374 5016 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/313107c9-4bb0-49ad-a67b-7f2e4ae09753-pod-info\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.066383 5016 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/313107c9-4bb0-49ad-a67b-7f2e4ae09753-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.066393 5016 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.066409 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/313107c9-4bb0-49ad-a67b-7f2e4ae09753-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.066419 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqp4d\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-kube-api-access-tqp4d\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.066427 5016 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.098901 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "313107c9-4bb0-49ad-a67b-7f2e4ae09753" (UID: "313107c9-4bb0-49ad-a67b-7f2e4ae09753"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.107195 5016 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.168310 5016 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.168358 5016 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/313107c9-4bb0-49ad-a67b-7f2e4ae09753-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.388996 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-d558885bc-2rt6f"] Dec 11 11:00:22 crc kubenswrapper[5016]: E1211 11:00:22.390286 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="313107c9-4bb0-49ad-a67b-7f2e4ae09753" containerName="rabbitmq" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.390314 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="313107c9-4bb0-49ad-a67b-7f2e4ae09753" containerName="rabbitmq" Dec 11 11:00:22 crc kubenswrapper[5016]: E1211 11:00:22.390328 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="313107c9-4bb0-49ad-a67b-7f2e4ae09753" containerName="setup-container" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.390335 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="313107c9-4bb0-49ad-a67b-7f2e4ae09753" containerName="setup-container" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.390592 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="313107c9-4bb0-49ad-a67b-7f2e4ae09753" containerName="rabbitmq" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.392108 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.394766 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.411130 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-2rt6f"] Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.474982 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-dns-svc\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.475091 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.475123 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.475413 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.475489 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.475540 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7q26\" (UniqueName: \"kubernetes.io/projected/b1dea00e-68ff-473b-a047-48afed50a533-kube-api-access-z7q26\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.475805 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-config\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.500744 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"313107c9-4bb0-49ad-a67b-7f2e4ae09753","Type":"ContainerDied","Data":"228a9b2d1092b574ec0f1d31270adf9b9790ab5d10c878fe75f0ba9b867ffff5"} Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.501059 5016 scope.go:117] "RemoveContainer" containerID="64b5accdc2fd0c92a017d393d7f08770227f2a59966376e256f9e6643bdc204d" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.501257 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.532341 5016 scope.go:117] "RemoveContainer" containerID="b2f2efa6faa297b8f0bc3ba17f76e2c24b2691d9c6af06345d0d0383aaacc499" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.555469 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.568215 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.579372 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-dns-svc\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.580744 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-dns-svc\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.581685 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.581794 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.582046 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.582096 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.582213 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7q26\" (UniqueName: \"kubernetes.io/projected/b1dea00e-68ff-473b-a047-48afed50a533-kube-api-access-z7q26\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.582697 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-config\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.583155 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.583205 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.584344 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.584443 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.584959 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-config\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.586200 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.586220 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.589477 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.589880 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.590099 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.591135 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.591330 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.592499 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.592913 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-t7pn9" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.616355 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.630995 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7q26\" (UniqueName: \"kubernetes.io/projected/b1dea00e-68ff-473b-a047-48afed50a533-kube-api-access-z7q26\") pod \"dnsmasq-dns-d558885bc-2rt6f\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.720445 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.720546 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.720776 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.721183 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt72x\" (UniqueName: \"kubernetes.io/projected/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-kube-api-access-qt72x\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.721263 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.721781 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.721843 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.721986 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.722048 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.722297 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.722418 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.810779 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.825102 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.825218 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.825303 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.825349 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.825392 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.825452 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.825507 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.825555 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.825666 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.825737 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt72x\" (UniqueName: \"kubernetes.io/projected/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-kube-api-access-qt72x\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.825778 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.826885 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.827346 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.827523 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.830527 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.830594 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.830862 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.833863 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.834054 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.837759 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.841466 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.857732 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt72x\" (UniqueName: \"kubernetes.io/projected/24d5919d-ee3d-4023-9a6b-bc1d9838b2ce-kube-api-access-qt72x\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:22 crc kubenswrapper[5016]: I1211 11:00:22.879625 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce\") " pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:23 crc kubenswrapper[5016]: I1211 11:00:23.066848 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:00:23 crc kubenswrapper[5016]: I1211 11:00:23.351256 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-2rt6f"] Dec 11 11:00:23 crc kubenswrapper[5016]: I1211 11:00:23.489454 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="313107c9-4bb0-49ad-a67b-7f2e4ae09753" path="/var/lib/kubelet/pods/313107c9-4bb0-49ad-a67b-7f2e4ae09753/volumes" Dec 11 11:00:23 crc kubenswrapper[5016]: I1211 11:00:23.531552 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" event={"ID":"b1dea00e-68ff-473b-a047-48afed50a533","Type":"ContainerStarted","Data":"bc06dcfd9c8ffe6804b8a43306b11cdb1b34dfe8b2bd2bcff189f6c307771485"} Dec 11 11:00:23 crc kubenswrapper[5016]: I1211 11:00:23.582273 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 11 11:00:24 crc kubenswrapper[5016]: I1211 11:00:24.544441 5016 generic.go:334] "Generic (PLEG): container finished" podID="b1dea00e-68ff-473b-a047-48afed50a533" containerID="c813ca13044229140a533b466b65c6e4cbc98fec020a5747cd873cd05d4aa0c0" exitCode=0 Dec 11 11:00:24 crc kubenswrapper[5016]: I1211 11:00:24.544653 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" event={"ID":"b1dea00e-68ff-473b-a047-48afed50a533","Type":"ContainerDied","Data":"c813ca13044229140a533b466b65c6e4cbc98fec020a5747cd873cd05d4aa0c0"} Dec 11 11:00:24 crc kubenswrapper[5016]: I1211 11:00:24.548144 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"86d18250-4387-46f7-af2c-2ce21bf43e12","Type":"ContainerStarted","Data":"3d1f76a753b05e3e46a4d198024aa587761432e28e6ba208366bafc52143a010"} Dec 11 11:00:24 crc kubenswrapper[5016]: I1211 11:00:24.549601 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce","Type":"ContainerStarted","Data":"850edceca31f6707fb5c946cedc1eec7a9f70f310c7d0376fc41bf056e383608"} Dec 11 11:00:26 crc kubenswrapper[5016]: I1211 11:00:26.577784 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" event={"ID":"b1dea00e-68ff-473b-a047-48afed50a533","Type":"ContainerStarted","Data":"ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b"} Dec 11 11:00:26 crc kubenswrapper[5016]: I1211 11:00:26.578732 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:26 crc kubenswrapper[5016]: I1211 11:00:26.582099 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce","Type":"ContainerStarted","Data":"bb584da23702278d771c3a6a6520781424a0beac9ec5a180dc6fec747e60ef72"} Dec 11 11:00:26 crc kubenswrapper[5016]: I1211 11:00:26.611028 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" podStartSLOduration=4.61100499 podStartE2EDuration="4.61100499s" podCreationTimestamp="2025-12-11 11:00:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 11:00:26.605319961 +0000 UTC m=+1543.423879560" watchObservedRunningTime="2025-12-11 11:00:26.61100499 +0000 UTC m=+1543.429564579" Dec 11 11:00:32 crc kubenswrapper[5016]: I1211 11:00:32.813806 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:32 crc kubenswrapper[5016]: I1211 11:00:32.879793 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-424j8"] Dec 11 11:00:32 crc kubenswrapper[5016]: I1211 11:00:32.880708 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" podUID="44f55261-c4b7-43a0-ad5d-7b84e6338f33" containerName="dnsmasq-dns" containerID="cri-o://b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc" gracePeriod=10 Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.101148 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-8nsnc"] Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.103928 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.138557 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-8nsnc"] Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.172136 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-config\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.172215 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-527rm\" (UniqueName: \"kubernetes.io/projected/18f4fb70-2aaa-471a-9556-b0977ad6ec55-kube-api-access-527rm\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.172283 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.172322 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.172366 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.172389 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.172417 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.274053 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-config\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.274116 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-527rm\" (UniqueName: \"kubernetes.io/projected/18f4fb70-2aaa-471a-9556-b0977ad6ec55-kube-api-access-527rm\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.274185 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.274213 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.274246 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.274262 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.274289 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.275178 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.275808 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-config\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.277343 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.278050 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.278893 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.279626 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/18f4fb70-2aaa-471a-9556-b0977ad6ec55-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.315423 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-527rm\" (UniqueName: \"kubernetes.io/projected/18f4fb70-2aaa-471a-9556-b0977ad6ec55-kube-api-access-527rm\") pod \"dnsmasq-dns-78c64bc9c5-8nsnc\" (UID: \"18f4fb70-2aaa-471a-9556-b0977ad6ec55\") " pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.432020 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.548755 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.583084 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dw8lm\" (UniqueName: \"kubernetes.io/projected/44f55261-c4b7-43a0-ad5d-7b84e6338f33-kube-api-access-dw8lm\") pod \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.583153 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-ovsdbserver-sb\") pod \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.583225 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-dns-swift-storage-0\") pod \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.583256 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-dns-svc\") pod \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.583362 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-ovsdbserver-nb\") pod \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.583598 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-config\") pod \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\" (UID: \"44f55261-c4b7-43a0-ad5d-7b84e6338f33\") " Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.595506 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44f55261-c4b7-43a0-ad5d-7b84e6338f33-kube-api-access-dw8lm" (OuterVolumeSpecName: "kube-api-access-dw8lm") pod "44f55261-c4b7-43a0-ad5d-7b84e6338f33" (UID: "44f55261-c4b7-43a0-ad5d-7b84e6338f33"). InnerVolumeSpecName "kube-api-access-dw8lm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.650438 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "44f55261-c4b7-43a0-ad5d-7b84e6338f33" (UID: "44f55261-c4b7-43a0-ad5d-7b84e6338f33"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.672259 5016 generic.go:334] "Generic (PLEG): container finished" podID="44f55261-c4b7-43a0-ad5d-7b84e6338f33" containerID="b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc" exitCode=0 Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.672302 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" event={"ID":"44f55261-c4b7-43a0-ad5d-7b84e6338f33","Type":"ContainerDied","Data":"b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc"} Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.672346 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" event={"ID":"44f55261-c4b7-43a0-ad5d-7b84e6338f33","Type":"ContainerDied","Data":"adb08b5199271d39e4620c653ef0f9d7aaebe281f53cf8b6edad1c6ce6e30ccb"} Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.672369 5016 scope.go:117] "RemoveContainer" containerID="b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.672411 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-config" (OuterVolumeSpecName: "config") pod "44f55261-c4b7-43a0-ad5d-7b84e6338f33" (UID: "44f55261-c4b7-43a0-ad5d-7b84e6338f33"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.672865 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-424j8" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.680120 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "44f55261-c4b7-43a0-ad5d-7b84e6338f33" (UID: "44f55261-c4b7-43a0-ad5d-7b84e6338f33"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.682453 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "44f55261-c4b7-43a0-ad5d-7b84e6338f33" (UID: "44f55261-c4b7-43a0-ad5d-7b84e6338f33"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.700621 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "44f55261-c4b7-43a0-ad5d-7b84e6338f33" (UID: "44f55261-c4b7-43a0-ad5d-7b84e6338f33"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.702182 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-config\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.702209 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dw8lm\" (UniqueName: \"kubernetes.io/projected/44f55261-c4b7-43a0-ad5d-7b84e6338f33-kube-api-access-dw8lm\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.702228 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.702240 5016 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.702252 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.702264 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/44f55261-c4b7-43a0-ad5d-7b84e6338f33-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.719979 5016 scope.go:117] "RemoveContainer" containerID="548415aa97b85dbd8dd6c510c36083641f8dff81adf0c1581267dc4679d27281" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.765842 5016 scope.go:117] "RemoveContainer" containerID="b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc" Dec 11 11:00:33 crc kubenswrapper[5016]: E1211 11:00:33.767662 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc\": container with ID starting with b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc not found: ID does not exist" containerID="b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.767712 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc"} err="failed to get container status \"b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc\": rpc error: code = NotFound desc = could not find container \"b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc\": container with ID starting with b3bce5d9b60db0debf55e9bd6f0c7d69ee443172eb782da6688f6e7ab026fcbc not found: ID does not exist" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.767742 5016 scope.go:117] "RemoveContainer" containerID="548415aa97b85dbd8dd6c510c36083641f8dff81adf0c1581267dc4679d27281" Dec 11 11:00:33 crc kubenswrapper[5016]: E1211 11:00:33.768415 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"548415aa97b85dbd8dd6c510c36083641f8dff81adf0c1581267dc4679d27281\": container with ID starting with 548415aa97b85dbd8dd6c510c36083641f8dff81adf0c1581267dc4679d27281 not found: ID does not exist" containerID="548415aa97b85dbd8dd6c510c36083641f8dff81adf0c1581267dc4679d27281" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.768452 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"548415aa97b85dbd8dd6c510c36083641f8dff81adf0c1581267dc4679d27281"} err="failed to get container status \"548415aa97b85dbd8dd6c510c36083641f8dff81adf0c1581267dc4679d27281\": rpc error: code = NotFound desc = could not find container \"548415aa97b85dbd8dd6c510c36083641f8dff81adf0c1581267dc4679d27281\": container with ID starting with 548415aa97b85dbd8dd6c510c36083641f8dff81adf0c1581267dc4679d27281 not found: ID does not exist" Dec 11 11:00:33 crc kubenswrapper[5016]: I1211 11:00:33.964780 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-8nsnc"] Dec 11 11:00:33 crc kubenswrapper[5016]: W1211 11:00:33.970395 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18f4fb70_2aaa_471a_9556_b0977ad6ec55.slice/crio-cc70a9f1fa3f523e01cf6d8372d2c6fadadafade3cd2944750ef522d7e64cfae WatchSource:0}: Error finding container cc70a9f1fa3f523e01cf6d8372d2c6fadadafade3cd2944750ef522d7e64cfae: Status 404 returned error can't find the container with id cc70a9f1fa3f523e01cf6d8372d2c6fadadafade3cd2944750ef522d7e64cfae Dec 11 11:00:34 crc kubenswrapper[5016]: I1211 11:00:34.212414 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-424j8"] Dec 11 11:00:34 crc kubenswrapper[5016]: I1211 11:00:34.224558 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-424j8"] Dec 11 11:00:34 crc kubenswrapper[5016]: I1211 11:00:34.683115 5016 generic.go:334] "Generic (PLEG): container finished" podID="18f4fb70-2aaa-471a-9556-b0977ad6ec55" containerID="272950e286c67d833cb0f68ad323c3cd188334fde3e428b252659a7fa8b9c190" exitCode=0 Dec 11 11:00:34 crc kubenswrapper[5016]: I1211 11:00:34.683312 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" event={"ID":"18f4fb70-2aaa-471a-9556-b0977ad6ec55","Type":"ContainerDied","Data":"272950e286c67d833cb0f68ad323c3cd188334fde3e428b252659a7fa8b9c190"} Dec 11 11:00:34 crc kubenswrapper[5016]: I1211 11:00:34.683534 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" event={"ID":"18f4fb70-2aaa-471a-9556-b0977ad6ec55","Type":"ContainerStarted","Data":"cc70a9f1fa3f523e01cf6d8372d2c6fadadafade3cd2944750ef522d7e64cfae"} Dec 11 11:00:35 crc kubenswrapper[5016]: I1211 11:00:35.486392 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44f55261-c4b7-43a0-ad5d-7b84e6338f33" path="/var/lib/kubelet/pods/44f55261-c4b7-43a0-ad5d-7b84e6338f33/volumes" Dec 11 11:00:35 crc kubenswrapper[5016]: I1211 11:00:35.700169 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" event={"ID":"18f4fb70-2aaa-471a-9556-b0977ad6ec55","Type":"ContainerStarted","Data":"81b472245714a3022f3319739530166467c12ddac38c2a4d4023024ed4abb02b"} Dec 11 11:00:35 crc kubenswrapper[5016]: I1211 11:00:35.700704 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:35 crc kubenswrapper[5016]: I1211 11:00:35.729773 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" podStartSLOduration=2.729750541 podStartE2EDuration="2.729750541s" podCreationTimestamp="2025-12-11 11:00:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 11:00:35.723110818 +0000 UTC m=+1552.541670407" watchObservedRunningTime="2025-12-11 11:00:35.729750541 +0000 UTC m=+1552.548310120" Dec 11 11:00:43 crc kubenswrapper[5016]: I1211 11:00:43.435235 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78c64bc9c5-8nsnc" Dec 11 11:00:43 crc kubenswrapper[5016]: I1211 11:00:43.528020 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-2rt6f"] Dec 11 11:00:43 crc kubenswrapper[5016]: I1211 11:00:43.528642 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" podUID="b1dea00e-68ff-473b-a047-48afed50a533" containerName="dnsmasq-dns" containerID="cri-o://ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b" gracePeriod=10 Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.532607 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.572604 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-config\") pod \"b1dea00e-68ff-473b-a047-48afed50a533\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.572677 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-openstack-edpm-ipam\") pod \"b1dea00e-68ff-473b-a047-48afed50a533\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.572780 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-dns-swift-storage-0\") pod \"b1dea00e-68ff-473b-a047-48afed50a533\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.572890 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-dns-svc\") pod \"b1dea00e-68ff-473b-a047-48afed50a533\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.572980 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7q26\" (UniqueName: \"kubernetes.io/projected/b1dea00e-68ff-473b-a047-48afed50a533-kube-api-access-z7q26\") pod \"b1dea00e-68ff-473b-a047-48afed50a533\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.573095 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-ovsdbserver-nb\") pod \"b1dea00e-68ff-473b-a047-48afed50a533\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.573277 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-ovsdbserver-sb\") pod \"b1dea00e-68ff-473b-a047-48afed50a533\" (UID: \"b1dea00e-68ff-473b-a047-48afed50a533\") " Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.581795 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1dea00e-68ff-473b-a047-48afed50a533-kube-api-access-z7q26" (OuterVolumeSpecName: "kube-api-access-z7q26") pod "b1dea00e-68ff-473b-a047-48afed50a533" (UID: "b1dea00e-68ff-473b-a047-48afed50a533"). InnerVolumeSpecName "kube-api-access-z7q26". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.671059 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b1dea00e-68ff-473b-a047-48afed50a533" (UID: "b1dea00e-68ff-473b-a047-48afed50a533"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.673799 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "b1dea00e-68ff-473b-a047-48afed50a533" (UID: "b1dea00e-68ff-473b-a047-48afed50a533"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.678297 5016 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.678323 5016 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.678338 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7q26\" (UniqueName: \"kubernetes.io/projected/b1dea00e-68ff-473b-a047-48afed50a533-kube-api-access-z7q26\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.682011 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b1dea00e-68ff-473b-a047-48afed50a533" (UID: "b1dea00e-68ff-473b-a047-48afed50a533"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.692753 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b1dea00e-68ff-473b-a047-48afed50a533" (UID: "b1dea00e-68ff-473b-a047-48afed50a533"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.721832 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b1dea00e-68ff-473b-a047-48afed50a533" (UID: "b1dea00e-68ff-473b-a047-48afed50a533"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.753148 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-config" (OuterVolumeSpecName: "config") pod "b1dea00e-68ff-473b-a047-48afed50a533" (UID: "b1dea00e-68ff-473b-a047-48afed50a533"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.782010 5016 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.782063 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.782082 5016 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.782096 5016 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1dea00e-68ff-473b-a047-48afed50a533-config\") on node \"crc\" DevicePath \"\"" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.813073 5016 generic.go:334] "Generic (PLEG): container finished" podID="b1dea00e-68ff-473b-a047-48afed50a533" containerID="ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b" exitCode=0 Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.813127 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" event={"ID":"b1dea00e-68ff-473b-a047-48afed50a533","Type":"ContainerDied","Data":"ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b"} Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.813177 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" event={"ID":"b1dea00e-68ff-473b-a047-48afed50a533","Type":"ContainerDied","Data":"bc06dcfd9c8ffe6804b8a43306b11cdb1b34dfe8b2bd2bcff189f6c307771485"} Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.813204 5016 scope.go:117] "RemoveContainer" containerID="ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.813410 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-2rt6f" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.884450 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-2rt6f"] Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.889586 5016 scope.go:117] "RemoveContainer" containerID="c813ca13044229140a533b466b65c6e4cbc98fec020a5747cd873cd05d4aa0c0" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.897196 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-2rt6f"] Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.912827 5016 scope.go:117] "RemoveContainer" containerID="ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b" Dec 11 11:00:45 crc kubenswrapper[5016]: E1211 11:00:44.916332 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b\": container with ID starting with ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b not found: ID does not exist" containerID="ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.916421 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b"} err="failed to get container status \"ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b\": rpc error: code = NotFound desc = could not find container \"ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b\": container with ID starting with ea60756bcea8f85e0ae1a7c03cb2abc0babbe23c16d2b733cffa370fb8ada87b not found: ID does not exist" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.916472 5016 scope.go:117] "RemoveContainer" containerID="c813ca13044229140a533b466b65c6e4cbc98fec020a5747cd873cd05d4aa0c0" Dec 11 11:00:45 crc kubenswrapper[5016]: E1211 11:00:44.917698 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c813ca13044229140a533b466b65c6e4cbc98fec020a5747cd873cd05d4aa0c0\": container with ID starting with c813ca13044229140a533b466b65c6e4cbc98fec020a5747cd873cd05d4aa0c0 not found: ID does not exist" containerID="c813ca13044229140a533b466b65c6e4cbc98fec020a5747cd873cd05d4aa0c0" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:44.917729 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c813ca13044229140a533b466b65c6e4cbc98fec020a5747cd873cd05d4aa0c0"} err="failed to get container status \"c813ca13044229140a533b466b65c6e4cbc98fec020a5747cd873cd05d4aa0c0\": rpc error: code = NotFound desc = could not find container \"c813ca13044229140a533b466b65c6e4cbc98fec020a5747cd873cd05d4aa0c0\": container with ID starting with c813ca13044229140a533b466b65c6e4cbc98fec020a5747cd873cd05d4aa0c0 not found: ID does not exist" Dec 11 11:00:45 crc kubenswrapper[5016]: I1211 11:00:45.487458 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1dea00e-68ff-473b-a047-48afed50a533" path="/var/lib/kubelet/pods/b1dea00e-68ff-473b-a047-48afed50a533/volumes" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.783915 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl"] Dec 11 11:00:56 crc kubenswrapper[5016]: E1211 11:00:56.784876 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1dea00e-68ff-473b-a047-48afed50a533" containerName="dnsmasq-dns" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.784890 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1dea00e-68ff-473b-a047-48afed50a533" containerName="dnsmasq-dns" Dec 11 11:00:56 crc kubenswrapper[5016]: E1211 11:00:56.784911 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44f55261-c4b7-43a0-ad5d-7b84e6338f33" containerName="init" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.784918 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="44f55261-c4b7-43a0-ad5d-7b84e6338f33" containerName="init" Dec 11 11:00:56 crc kubenswrapper[5016]: E1211 11:00:56.784958 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44f55261-c4b7-43a0-ad5d-7b84e6338f33" containerName="dnsmasq-dns" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.784967 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="44f55261-c4b7-43a0-ad5d-7b84e6338f33" containerName="dnsmasq-dns" Dec 11 11:00:56 crc kubenswrapper[5016]: E1211 11:00:56.784986 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1dea00e-68ff-473b-a047-48afed50a533" containerName="init" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.784994 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1dea00e-68ff-473b-a047-48afed50a533" containerName="init" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.785210 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1dea00e-68ff-473b-a047-48afed50a533" containerName="dnsmasq-dns" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.785242 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="44f55261-c4b7-43a0-ad5d-7b84e6338f33" containerName="dnsmasq-dns" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.785924 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.793824 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.794257 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.794441 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.796185 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.800451 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl"] Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.865115 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.865268 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.865345 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpzn4\" (UniqueName: \"kubernetes.io/projected/b8f46431-27eb-4bb3-952a-3dd405e15121-kube-api-access-kpzn4\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.865378 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.939811 5016 generic.go:334] "Generic (PLEG): container finished" podID="86d18250-4387-46f7-af2c-2ce21bf43e12" containerID="3d1f76a753b05e3e46a4d198024aa587761432e28e6ba208366bafc52143a010" exitCode=0 Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.939864 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"86d18250-4387-46f7-af2c-2ce21bf43e12","Type":"ContainerDied","Data":"3d1f76a753b05e3e46a4d198024aa587761432e28e6ba208366bafc52143a010"} Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.966544 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.967086 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.967214 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpzn4\" (UniqueName: \"kubernetes.io/projected/b8f46431-27eb-4bb3-952a-3dd405e15121-kube-api-access-kpzn4\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.967688 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.970842 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.971420 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.971686 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:56 crc kubenswrapper[5016]: I1211 11:00:56.989465 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpzn4\" (UniqueName: \"kubernetes.io/projected/b8f46431-27eb-4bb3-952a-3dd405e15121-kube-api-access-kpzn4\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:57 crc kubenswrapper[5016]: I1211 11:00:57.105742 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:00:57 crc kubenswrapper[5016]: W1211 11:00:57.717636 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8f46431_27eb_4bb3_952a_3dd405e15121.slice/crio-ec5f41c5087228449639f8f126fb3a092401b0c3f661121726807a5da8d6ccf6 WatchSource:0}: Error finding container ec5f41c5087228449639f8f126fb3a092401b0c3f661121726807a5da8d6ccf6: Status 404 returned error can't find the container with id ec5f41c5087228449639f8f126fb3a092401b0c3f661121726807a5da8d6ccf6 Dec 11 11:00:57 crc kubenswrapper[5016]: I1211 11:00:57.721489 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl"] Dec 11 11:00:57 crc kubenswrapper[5016]: I1211 11:00:57.958201 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" event={"ID":"b8f46431-27eb-4bb3-952a-3dd405e15121","Type":"ContainerStarted","Data":"ec5f41c5087228449639f8f126fb3a092401b0c3f661121726807a5da8d6ccf6"} Dec 11 11:00:57 crc kubenswrapper[5016]: I1211 11:00:57.965657 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"86d18250-4387-46f7-af2c-2ce21bf43e12","Type":"ContainerStarted","Data":"75cec1889da5e200911a14617a07f2affd44a89f26a165bff5b61c06710d1cad"} Dec 11 11:00:57 crc kubenswrapper[5016]: I1211 11:00:57.965953 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 11 11:00:57 crc kubenswrapper[5016]: I1211 11:00:57.990161 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.99014163 podStartE2EDuration="37.99014163s" podCreationTimestamp="2025-12-11 11:00:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 11:00:57.986800608 +0000 UTC m=+1574.805360187" watchObservedRunningTime="2025-12-11 11:00:57.99014163 +0000 UTC m=+1574.808701209" Dec 11 11:00:58 crc kubenswrapper[5016]: I1211 11:00:58.977902 5016 generic.go:334] "Generic (PLEG): container finished" podID="24d5919d-ee3d-4023-9a6b-bc1d9838b2ce" containerID="bb584da23702278d771c3a6a6520781424a0beac9ec5a180dc6fec747e60ef72" exitCode=0 Dec 11 11:00:58 crc kubenswrapper[5016]: I1211 11:00:58.978004 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce","Type":"ContainerDied","Data":"bb584da23702278d771c3a6a6520781424a0beac9ec5a180dc6fec747e60ef72"} Dec 11 11:00:59 crc kubenswrapper[5016]: I1211 11:00:59.990481 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"24d5919d-ee3d-4023-9a6b-bc1d9838b2ce","Type":"ContainerStarted","Data":"b68a56e611fd4d19f5e5006ba937d79ade2ee627309fbb2715a804798507f5e1"} Dec 11 11:00:59 crc kubenswrapper[5016]: I1211 11:00:59.991288 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.018297 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.018276112 podStartE2EDuration="38.018276112s" podCreationTimestamp="2025-12-11 11:00:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 11:01:00.017053732 +0000 UTC m=+1576.835613321" watchObservedRunningTime="2025-12-11 11:01:00.018276112 +0000 UTC m=+1576.836835691" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.142777 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29424181-nmfs5"] Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.145255 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.157820 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29424181-nmfs5"] Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.245299 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-fernet-keys\") pod \"keystone-cron-29424181-nmfs5\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.245383 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-combined-ca-bundle\") pod \"keystone-cron-29424181-nmfs5\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.245427 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-config-data\") pod \"keystone-cron-29424181-nmfs5\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.245555 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mn9b6\" (UniqueName: \"kubernetes.io/projected/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-kube-api-access-mn9b6\") pod \"keystone-cron-29424181-nmfs5\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.347615 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-fernet-keys\") pod \"keystone-cron-29424181-nmfs5\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.347735 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-combined-ca-bundle\") pod \"keystone-cron-29424181-nmfs5\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.347768 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-config-data\") pod \"keystone-cron-29424181-nmfs5\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.347852 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mn9b6\" (UniqueName: \"kubernetes.io/projected/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-kube-api-access-mn9b6\") pod \"keystone-cron-29424181-nmfs5\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.354338 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-combined-ca-bundle\") pod \"keystone-cron-29424181-nmfs5\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.354723 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-config-data\") pod \"keystone-cron-29424181-nmfs5\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.361249 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-fernet-keys\") pod \"keystone-cron-29424181-nmfs5\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.366916 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mn9b6\" (UniqueName: \"kubernetes.io/projected/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-kube-api-access-mn9b6\") pod \"keystone-cron-29424181-nmfs5\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:00 crc kubenswrapper[5016]: I1211 11:01:00.472451 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:01 crc kubenswrapper[5016]: I1211 11:01:01.054832 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29424181-nmfs5"] Dec 11 11:01:02 crc kubenswrapper[5016]: I1211 11:01:02.021956 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29424181-nmfs5" event={"ID":"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2","Type":"ContainerStarted","Data":"a60a3f721585817fea24a1edb36e5333571bb7547f009fa563c6a7399be777c5"} Dec 11 11:01:02 crc kubenswrapper[5016]: I1211 11:01:02.022525 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29424181-nmfs5" event={"ID":"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2","Type":"ContainerStarted","Data":"35989b5e0bc4136abfe12f96f2e40225bed78dd5da7a48648359ae01e16a4894"} Dec 11 11:01:02 crc kubenswrapper[5016]: I1211 11:01:02.046730 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29424181-nmfs5" podStartSLOduration=2.046705131 podStartE2EDuration="2.046705131s" podCreationTimestamp="2025-12-11 11:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 11:01:02.042478708 +0000 UTC m=+1578.861038307" watchObservedRunningTime="2025-12-11 11:01:02.046705131 +0000 UTC m=+1578.865264710" Dec 11 11:01:07 crc kubenswrapper[5016]: I1211 11:01:07.088365 5016 generic.go:334] "Generic (PLEG): container finished" podID="59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2" containerID="a60a3f721585817fea24a1edb36e5333571bb7547f009fa563c6a7399be777c5" exitCode=0 Dec 11 11:01:07 crc kubenswrapper[5016]: I1211 11:01:07.089046 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29424181-nmfs5" event={"ID":"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2","Type":"ContainerDied","Data":"a60a3f721585817fea24a1edb36e5333571bb7547f009fa563c6a7399be777c5"} Dec 11 11:01:09 crc kubenswrapper[5016]: I1211 11:01:09.882163 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.005494 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-fernet-keys\") pod \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.005607 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-config-data\") pod \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.005773 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-combined-ca-bundle\") pod \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.006235 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mn9b6\" (UniqueName: \"kubernetes.io/projected/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-kube-api-access-mn9b6\") pod \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\" (UID: \"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2\") " Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.012194 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-kube-api-access-mn9b6" (OuterVolumeSpecName: "kube-api-access-mn9b6") pod "59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2" (UID: "59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2"). InnerVolumeSpecName "kube-api-access-mn9b6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.012713 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2" (UID: "59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.037673 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2" (UID: "59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.066714 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-config-data" (OuterVolumeSpecName: "config-data") pod "59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2" (UID: "59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.108755 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mn9b6\" (UniqueName: \"kubernetes.io/projected/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-kube-api-access-mn9b6\") on node \"crc\" DevicePath \"\"" Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.108807 5016 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.108823 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.108837 5016 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.145504 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" event={"ID":"b8f46431-27eb-4bb3-952a-3dd405e15121","Type":"ContainerStarted","Data":"ed4dcc389437a6bd35eedc340e5fd19052b1487785f45ab389b9c88e8ffebcf4"} Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.147557 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29424181-nmfs5" event={"ID":"59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2","Type":"ContainerDied","Data":"35989b5e0bc4136abfe12f96f2e40225bed78dd5da7a48648359ae01e16a4894"} Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.147608 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35989b5e0bc4136abfe12f96f2e40225bed78dd5da7a48648359ae01e16a4894" Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.147621 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29424181-nmfs5" Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.184173 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" podStartSLOduration=2.17948289 podStartE2EDuration="14.184144577s" podCreationTimestamp="2025-12-11 11:00:56 +0000 UTC" firstStartedPulling="2025-12-11 11:00:57.720915264 +0000 UTC m=+1574.539474843" lastFinishedPulling="2025-12-11 11:01:09.725576941 +0000 UTC m=+1586.544136530" observedRunningTime="2025-12-11 11:01:10.168087823 +0000 UTC m=+1586.986647452" watchObservedRunningTime="2025-12-11 11:01:10.184144577 +0000 UTC m=+1587.002704176" Dec 11 11:01:10 crc kubenswrapper[5016]: I1211 11:01:10.912399 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="86d18250-4387-46f7-af2c-2ce21bf43e12" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.204:5671: connect: connection refused" Dec 11 11:01:13 crc kubenswrapper[5016]: I1211 11:01:13.072258 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 11 11:01:20 crc kubenswrapper[5016]: I1211 11:01:20.911295 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 11 11:01:23 crc kubenswrapper[5016]: I1211 11:01:23.275770 5016 generic.go:334] "Generic (PLEG): container finished" podID="b8f46431-27eb-4bb3-952a-3dd405e15121" containerID="ed4dcc389437a6bd35eedc340e5fd19052b1487785f45ab389b9c88e8ffebcf4" exitCode=0 Dec 11 11:01:23 crc kubenswrapper[5016]: I1211 11:01:23.275851 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" event={"ID":"b8f46431-27eb-4bb3-952a-3dd405e15121","Type":"ContainerDied","Data":"ed4dcc389437a6bd35eedc340e5fd19052b1487785f45ab389b9c88e8ffebcf4"} Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.751336 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.849692 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpzn4\" (UniqueName: \"kubernetes.io/projected/b8f46431-27eb-4bb3-952a-3dd405e15121-kube-api-access-kpzn4\") pod \"b8f46431-27eb-4bb3-952a-3dd405e15121\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.849771 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-inventory\") pod \"b8f46431-27eb-4bb3-952a-3dd405e15121\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.849896 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-ssh-key\") pod \"b8f46431-27eb-4bb3-952a-3dd405e15121\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.850038 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-repo-setup-combined-ca-bundle\") pod \"b8f46431-27eb-4bb3-952a-3dd405e15121\" (UID: \"b8f46431-27eb-4bb3-952a-3dd405e15121\") " Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.856133 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8f46431-27eb-4bb3-952a-3dd405e15121-kube-api-access-kpzn4" (OuterVolumeSpecName: "kube-api-access-kpzn4") pod "b8f46431-27eb-4bb3-952a-3dd405e15121" (UID: "b8f46431-27eb-4bb3-952a-3dd405e15121"). InnerVolumeSpecName "kube-api-access-kpzn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.857057 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "b8f46431-27eb-4bb3-952a-3dd405e15121" (UID: "b8f46431-27eb-4bb3-952a-3dd405e15121"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.882325 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b8f46431-27eb-4bb3-952a-3dd405e15121" (UID: "b8f46431-27eb-4bb3-952a-3dd405e15121"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.886561 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-inventory" (OuterVolumeSpecName: "inventory") pod "b8f46431-27eb-4bb3-952a-3dd405e15121" (UID: "b8f46431-27eb-4bb3-952a-3dd405e15121"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.952138 5016 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.952182 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpzn4\" (UniqueName: \"kubernetes.io/projected/b8f46431-27eb-4bb3-952a-3dd405e15121-kube-api-access-kpzn4\") on node \"crc\" DevicePath \"\"" Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.952193 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:01:24 crc kubenswrapper[5016]: I1211 11:01:24.952202 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b8f46431-27eb-4bb3-952a-3dd405e15121-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.323203 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" event={"ID":"b8f46431-27eb-4bb3-952a-3dd405e15121","Type":"ContainerDied","Data":"ec5f41c5087228449639f8f126fb3a092401b0c3f661121726807a5da8d6ccf6"} Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.323735 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec5f41c5087228449639f8f126fb3a092401b0c3f661121726807a5da8d6ccf6" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.323329 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.388965 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb"] Dec 11 11:01:25 crc kubenswrapper[5016]: E1211 11:01:25.389387 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8f46431-27eb-4bb3-952a-3dd405e15121" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.389402 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8f46431-27eb-4bb3-952a-3dd405e15121" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 11 11:01:25 crc kubenswrapper[5016]: E1211 11:01:25.389418 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2" containerName="keystone-cron" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.389426 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2" containerName="keystone-cron" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.389678 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2" containerName="keystone-cron" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.389708 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8f46431-27eb-4bb3-952a-3dd405e15121" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.390448 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.393724 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.394626 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.396585 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.396623 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.409200 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb"] Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.463441 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e240bd3c-2bc0-4e00-b092-51ab30da277d-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-26jgb\" (UID: \"e240bd3c-2bc0-4e00-b092-51ab30da277d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.463777 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqfh4\" (UniqueName: \"kubernetes.io/projected/e240bd3c-2bc0-4e00-b092-51ab30da277d-kube-api-access-hqfh4\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-26jgb\" (UID: \"e240bd3c-2bc0-4e00-b092-51ab30da277d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.463895 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e240bd3c-2bc0-4e00-b092-51ab30da277d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-26jgb\" (UID: \"e240bd3c-2bc0-4e00-b092-51ab30da277d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.566397 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e240bd3c-2bc0-4e00-b092-51ab30da277d-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-26jgb\" (UID: \"e240bd3c-2bc0-4e00-b092-51ab30da277d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.566691 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqfh4\" (UniqueName: \"kubernetes.io/projected/e240bd3c-2bc0-4e00-b092-51ab30da277d-kube-api-access-hqfh4\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-26jgb\" (UID: \"e240bd3c-2bc0-4e00-b092-51ab30da277d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.567522 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e240bd3c-2bc0-4e00-b092-51ab30da277d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-26jgb\" (UID: \"e240bd3c-2bc0-4e00-b092-51ab30da277d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.576873 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e240bd3c-2bc0-4e00-b092-51ab30da277d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-26jgb\" (UID: \"e240bd3c-2bc0-4e00-b092-51ab30da277d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.576967 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e240bd3c-2bc0-4e00-b092-51ab30da277d-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-26jgb\" (UID: \"e240bd3c-2bc0-4e00-b092-51ab30da277d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.585799 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqfh4\" (UniqueName: \"kubernetes.io/projected/e240bd3c-2bc0-4e00-b092-51ab30da277d-kube-api-access-hqfh4\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-26jgb\" (UID: \"e240bd3c-2bc0-4e00-b092-51ab30da277d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:25 crc kubenswrapper[5016]: I1211 11:01:25.710135 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:26 crc kubenswrapper[5016]: I1211 11:01:26.146793 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb"] Dec 11 11:01:26 crc kubenswrapper[5016]: I1211 11:01:26.335222 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" event={"ID":"e240bd3c-2bc0-4e00-b092-51ab30da277d","Type":"ContainerStarted","Data":"3e7182acf22aabe363c59151ed3f2ac9fda34ad426fe83c9654b89f1a115c0dd"} Dec 11 11:01:27 crc kubenswrapper[5016]: I1211 11:01:27.349922 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" event={"ID":"e240bd3c-2bc0-4e00-b092-51ab30da277d","Type":"ContainerStarted","Data":"89997bf72773a6f70da2575615e69cc4fc9b714f48773c5161cf92fb7bed1a79"} Dec 11 11:01:27 crc kubenswrapper[5016]: I1211 11:01:27.372476 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" podStartSLOduration=1.822419918 podStartE2EDuration="2.372446704s" podCreationTimestamp="2025-12-11 11:01:25 +0000 UTC" firstStartedPulling="2025-12-11 11:01:26.14377495 +0000 UTC m=+1602.962334529" lastFinishedPulling="2025-12-11 11:01:26.693801716 +0000 UTC m=+1603.512361315" observedRunningTime="2025-12-11 11:01:27.366666362 +0000 UTC m=+1604.185225971" watchObservedRunningTime="2025-12-11 11:01:27.372446704 +0000 UTC m=+1604.191006283" Dec 11 11:01:30 crc kubenswrapper[5016]: I1211 11:01:30.387432 5016 generic.go:334] "Generic (PLEG): container finished" podID="e240bd3c-2bc0-4e00-b092-51ab30da277d" containerID="89997bf72773a6f70da2575615e69cc4fc9b714f48773c5161cf92fb7bed1a79" exitCode=0 Dec 11 11:01:30 crc kubenswrapper[5016]: I1211 11:01:30.387532 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" event={"ID":"e240bd3c-2bc0-4e00-b092-51ab30da277d","Type":"ContainerDied","Data":"89997bf72773a6f70da2575615e69cc4fc9b714f48773c5161cf92fb7bed1a79"} Dec 11 11:01:31 crc kubenswrapper[5016]: I1211 11:01:31.908767 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.036406 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e240bd3c-2bc0-4e00-b092-51ab30da277d-inventory\") pod \"e240bd3c-2bc0-4e00-b092-51ab30da277d\" (UID: \"e240bd3c-2bc0-4e00-b092-51ab30da277d\") " Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.037014 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqfh4\" (UniqueName: \"kubernetes.io/projected/e240bd3c-2bc0-4e00-b092-51ab30da277d-kube-api-access-hqfh4\") pod \"e240bd3c-2bc0-4e00-b092-51ab30da277d\" (UID: \"e240bd3c-2bc0-4e00-b092-51ab30da277d\") " Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.037229 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e240bd3c-2bc0-4e00-b092-51ab30da277d-ssh-key\") pod \"e240bd3c-2bc0-4e00-b092-51ab30da277d\" (UID: \"e240bd3c-2bc0-4e00-b092-51ab30da277d\") " Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.045049 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e240bd3c-2bc0-4e00-b092-51ab30da277d-kube-api-access-hqfh4" (OuterVolumeSpecName: "kube-api-access-hqfh4") pod "e240bd3c-2bc0-4e00-b092-51ab30da277d" (UID: "e240bd3c-2bc0-4e00-b092-51ab30da277d"). InnerVolumeSpecName "kube-api-access-hqfh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.071528 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e240bd3c-2bc0-4e00-b092-51ab30da277d-inventory" (OuterVolumeSpecName: "inventory") pod "e240bd3c-2bc0-4e00-b092-51ab30da277d" (UID: "e240bd3c-2bc0-4e00-b092-51ab30da277d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.077345 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e240bd3c-2bc0-4e00-b092-51ab30da277d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e240bd3c-2bc0-4e00-b092-51ab30da277d" (UID: "e240bd3c-2bc0-4e00-b092-51ab30da277d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.139860 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e240bd3c-2bc0-4e00-b092-51ab30da277d-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.139903 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqfh4\" (UniqueName: \"kubernetes.io/projected/e240bd3c-2bc0-4e00-b092-51ab30da277d-kube-api-access-hqfh4\") on node \"crc\" DevicePath \"\"" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.139919 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e240bd3c-2bc0-4e00-b092-51ab30da277d-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.414446 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" event={"ID":"e240bd3c-2bc0-4e00-b092-51ab30da277d","Type":"ContainerDied","Data":"3e7182acf22aabe363c59151ed3f2ac9fda34ad426fe83c9654b89f1a115c0dd"} Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.414520 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e7182acf22aabe363c59151ed3f2ac9fda34ad426fe83c9654b89f1a115c0dd" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.414691 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-26jgb" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.516556 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b"] Dec 11 11:01:32 crc kubenswrapper[5016]: E1211 11:01:32.517493 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e240bd3c-2bc0-4e00-b092-51ab30da277d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.517651 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e240bd3c-2bc0-4e00-b092-51ab30da277d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.518074 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="e240bd3c-2bc0-4e00-b092-51ab30da277d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.519790 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.524709 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.526074 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.526084 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.526207 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.533282 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b"] Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.658522 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.658654 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.658677 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.658696 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cznj6\" (UniqueName: \"kubernetes.io/projected/10792fe7-d5d5-4918-8658-20331647f302-kube-api-access-cznj6\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.761614 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.762090 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.762166 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.762232 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cznj6\" (UniqueName: \"kubernetes.io/projected/10792fe7-d5d5-4918-8658-20331647f302-kube-api-access-cznj6\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.771110 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.771329 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.780988 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.787588 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cznj6\" (UniqueName: \"kubernetes.io/projected/10792fe7-d5d5-4918-8658-20331647f302-kube-api-access-cznj6\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:32 crc kubenswrapper[5016]: I1211 11:01:32.854095 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:01:33 crc kubenswrapper[5016]: I1211 11:01:33.431393 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b"] Dec 11 11:01:34 crc kubenswrapper[5016]: I1211 11:01:34.442087 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" event={"ID":"10792fe7-d5d5-4918-8658-20331647f302","Type":"ContainerStarted","Data":"0b68e1279c8850cd104b4c0010e9a597ccfc752d981bbb1baf3798137c5dd367"} Dec 11 11:01:35 crc kubenswrapper[5016]: I1211 11:01:35.451398 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" event={"ID":"10792fe7-d5d5-4918-8658-20331647f302","Type":"ContainerStarted","Data":"9efb1f94d48be5529e468ef34f04cb07b9df1d1697619f25e2626c73a1f15dc8"} Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.009016 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" podStartSLOduration=22.637698451 podStartE2EDuration="24.008994339s" podCreationTimestamp="2025-12-11 11:01:32 +0000 UTC" firstStartedPulling="2025-12-11 11:01:33.435004635 +0000 UTC m=+1610.253564214" lastFinishedPulling="2025-12-11 11:01:34.806300523 +0000 UTC m=+1611.624860102" observedRunningTime="2025-12-11 11:01:35.475618572 +0000 UTC m=+1612.294178161" watchObservedRunningTime="2025-12-11 11:01:56.008994339 +0000 UTC m=+1632.827553918" Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.012659 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c8rmb"] Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.015068 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.033589 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c8rmb"] Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.151205 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq85n\" (UniqueName: \"kubernetes.io/projected/dbe0685f-3f66-4844-a100-be9a70af2bb7-kube-api-access-tq85n\") pod \"certified-operators-c8rmb\" (UID: \"dbe0685f-3f66-4844-a100-be9a70af2bb7\") " pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.151365 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe0685f-3f66-4844-a100-be9a70af2bb7-catalog-content\") pod \"certified-operators-c8rmb\" (UID: \"dbe0685f-3f66-4844-a100-be9a70af2bb7\") " pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.151411 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe0685f-3f66-4844-a100-be9a70af2bb7-utilities\") pod \"certified-operators-c8rmb\" (UID: \"dbe0685f-3f66-4844-a100-be9a70af2bb7\") " pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.252980 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq85n\" (UniqueName: \"kubernetes.io/projected/dbe0685f-3f66-4844-a100-be9a70af2bb7-kube-api-access-tq85n\") pod \"certified-operators-c8rmb\" (UID: \"dbe0685f-3f66-4844-a100-be9a70af2bb7\") " pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.253742 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe0685f-3f66-4844-a100-be9a70af2bb7-catalog-content\") pod \"certified-operators-c8rmb\" (UID: \"dbe0685f-3f66-4844-a100-be9a70af2bb7\") " pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.254354 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe0685f-3f66-4844-a100-be9a70af2bb7-catalog-content\") pod \"certified-operators-c8rmb\" (UID: \"dbe0685f-3f66-4844-a100-be9a70af2bb7\") " pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.254524 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe0685f-3f66-4844-a100-be9a70af2bb7-utilities\") pod \"certified-operators-c8rmb\" (UID: \"dbe0685f-3f66-4844-a100-be9a70af2bb7\") " pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.254886 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe0685f-3f66-4844-a100-be9a70af2bb7-utilities\") pod \"certified-operators-c8rmb\" (UID: \"dbe0685f-3f66-4844-a100-be9a70af2bb7\") " pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.277592 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq85n\" (UniqueName: \"kubernetes.io/projected/dbe0685f-3f66-4844-a100-be9a70af2bb7-kube-api-access-tq85n\") pod \"certified-operators-c8rmb\" (UID: \"dbe0685f-3f66-4844-a100-be9a70af2bb7\") " pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.347470 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:01:56 crc kubenswrapper[5016]: W1211 11:01:56.871652 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbe0685f_3f66_4844_a100_be9a70af2bb7.slice/crio-7ac7611f149e79067e06031816089ced4e0d9f06c669b70963b9dba5eddc16a5 WatchSource:0}: Error finding container 7ac7611f149e79067e06031816089ced4e0d9f06c669b70963b9dba5eddc16a5: Status 404 returned error can't find the container with id 7ac7611f149e79067e06031816089ced4e0d9f06c669b70963b9dba5eddc16a5 Dec 11 11:01:56 crc kubenswrapper[5016]: I1211 11:01:56.879728 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c8rmb"] Dec 11 11:01:57 crc kubenswrapper[5016]: I1211 11:01:57.704594 5016 generic.go:334] "Generic (PLEG): container finished" podID="dbe0685f-3f66-4844-a100-be9a70af2bb7" containerID="91d9fe660aef116f37dfb9b8f665905a0223e5e78cec0b7dde90695de46db9c3" exitCode=0 Dec 11 11:01:57 crc kubenswrapper[5016]: I1211 11:01:57.704718 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8rmb" event={"ID":"dbe0685f-3f66-4844-a100-be9a70af2bb7","Type":"ContainerDied","Data":"91d9fe660aef116f37dfb9b8f665905a0223e5e78cec0b7dde90695de46db9c3"} Dec 11 11:01:57 crc kubenswrapper[5016]: I1211 11:01:57.705013 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8rmb" event={"ID":"dbe0685f-3f66-4844-a100-be9a70af2bb7","Type":"ContainerStarted","Data":"7ac7611f149e79067e06031816089ced4e0d9f06c669b70963b9dba5eddc16a5"} Dec 11 11:01:59 crc kubenswrapper[5016]: I1211 11:01:59.730127 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8rmb" event={"ID":"dbe0685f-3f66-4844-a100-be9a70af2bb7","Type":"ContainerStarted","Data":"9a475d7ea16ee81335e7d10d73cba278d7eb3087310d516be0609cc4fa9a5b0f"} Dec 11 11:02:01 crc kubenswrapper[5016]: I1211 11:02:01.768336 5016 generic.go:334] "Generic (PLEG): container finished" podID="dbe0685f-3f66-4844-a100-be9a70af2bb7" containerID="9a475d7ea16ee81335e7d10d73cba278d7eb3087310d516be0609cc4fa9a5b0f" exitCode=0 Dec 11 11:02:01 crc kubenswrapper[5016]: I1211 11:02:01.768881 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8rmb" event={"ID":"dbe0685f-3f66-4844-a100-be9a70af2bb7","Type":"ContainerDied","Data":"9a475d7ea16ee81335e7d10d73cba278d7eb3087310d516be0609cc4fa9a5b0f"} Dec 11 11:02:04 crc kubenswrapper[5016]: I1211 11:02:04.808208 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8rmb" event={"ID":"dbe0685f-3f66-4844-a100-be9a70af2bb7","Type":"ContainerStarted","Data":"a099f2b6d200efc80a8dc3d6bf02753567124070ecec1652ffc2bf85ac4df96e"} Dec 11 11:02:04 crc kubenswrapper[5016]: I1211 11:02:04.832330 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c8rmb" podStartSLOduration=3.57092803 podStartE2EDuration="9.832310982s" podCreationTimestamp="2025-12-11 11:01:55 +0000 UTC" firstStartedPulling="2025-12-11 11:01:57.706792017 +0000 UTC m=+1634.525351596" lastFinishedPulling="2025-12-11 11:02:03.968174969 +0000 UTC m=+1640.786734548" observedRunningTime="2025-12-11 11:02:04.830748734 +0000 UTC m=+1641.649308333" watchObservedRunningTime="2025-12-11 11:02:04.832310982 +0000 UTC m=+1641.650870561" Dec 11 11:02:06 crc kubenswrapper[5016]: I1211 11:02:06.349729 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:02:06 crc kubenswrapper[5016]: I1211 11:02:06.350010 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:02:06 crc kubenswrapper[5016]: I1211 11:02:06.398212 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:02:09 crc kubenswrapper[5016]: I1211 11:02:09.737070 5016 scope.go:117] "RemoveContainer" containerID="56ca242e72da8cae9557a994f452d92971afc8299dd507cabe42ead81b910d7f" Dec 11 11:02:12 crc kubenswrapper[5016]: I1211 11:02:12.932583 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:02:12 crc kubenswrapper[5016]: I1211 11:02:12.933231 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:02:16 crc kubenswrapper[5016]: I1211 11:02:16.398550 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:02:16 crc kubenswrapper[5016]: I1211 11:02:16.454796 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c8rmb"] Dec 11 11:02:16 crc kubenswrapper[5016]: I1211 11:02:16.931594 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c8rmb" podUID="dbe0685f-3f66-4844-a100-be9a70af2bb7" containerName="registry-server" containerID="cri-o://a099f2b6d200efc80a8dc3d6bf02753567124070ecec1652ffc2bf85ac4df96e" gracePeriod=2 Dec 11 11:02:17 crc kubenswrapper[5016]: I1211 11:02:17.943161 5016 generic.go:334] "Generic (PLEG): container finished" podID="dbe0685f-3f66-4844-a100-be9a70af2bb7" containerID="a099f2b6d200efc80a8dc3d6bf02753567124070ecec1652ffc2bf85ac4df96e" exitCode=0 Dec 11 11:02:17 crc kubenswrapper[5016]: I1211 11:02:17.943224 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8rmb" event={"ID":"dbe0685f-3f66-4844-a100-be9a70af2bb7","Type":"ContainerDied","Data":"a099f2b6d200efc80a8dc3d6bf02753567124070ecec1652ffc2bf85ac4df96e"} Dec 11 11:02:17 crc kubenswrapper[5016]: I1211 11:02:17.943559 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8rmb" event={"ID":"dbe0685f-3f66-4844-a100-be9a70af2bb7","Type":"ContainerDied","Data":"7ac7611f149e79067e06031816089ced4e0d9f06c669b70963b9dba5eddc16a5"} Dec 11 11:02:17 crc kubenswrapper[5016]: I1211 11:02:17.943587 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ac7611f149e79067e06031816089ced4e0d9f06c669b70963b9dba5eddc16a5" Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.018541 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.133427 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe0685f-3f66-4844-a100-be9a70af2bb7-catalog-content\") pod \"dbe0685f-3f66-4844-a100-be9a70af2bb7\" (UID: \"dbe0685f-3f66-4844-a100-be9a70af2bb7\") " Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.133557 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tq85n\" (UniqueName: \"kubernetes.io/projected/dbe0685f-3f66-4844-a100-be9a70af2bb7-kube-api-access-tq85n\") pod \"dbe0685f-3f66-4844-a100-be9a70af2bb7\" (UID: \"dbe0685f-3f66-4844-a100-be9a70af2bb7\") " Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.134364 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe0685f-3f66-4844-a100-be9a70af2bb7-utilities\") pod \"dbe0685f-3f66-4844-a100-be9a70af2bb7\" (UID: \"dbe0685f-3f66-4844-a100-be9a70af2bb7\") " Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.134898 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbe0685f-3f66-4844-a100-be9a70af2bb7-utilities" (OuterVolumeSpecName: "utilities") pod "dbe0685f-3f66-4844-a100-be9a70af2bb7" (UID: "dbe0685f-3f66-4844-a100-be9a70af2bb7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.139275 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbe0685f-3f66-4844-a100-be9a70af2bb7-kube-api-access-tq85n" (OuterVolumeSpecName: "kube-api-access-tq85n") pod "dbe0685f-3f66-4844-a100-be9a70af2bb7" (UID: "dbe0685f-3f66-4844-a100-be9a70af2bb7"). InnerVolumeSpecName "kube-api-access-tq85n". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.180324 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbe0685f-3f66-4844-a100-be9a70af2bb7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dbe0685f-3f66-4844-a100-be9a70af2bb7" (UID: "dbe0685f-3f66-4844-a100-be9a70af2bb7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.236455 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tq85n\" (UniqueName: \"kubernetes.io/projected/dbe0685f-3f66-4844-a100-be9a70af2bb7-kube-api-access-tq85n\") on node \"crc\" DevicePath \"\"" Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.236762 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe0685f-3f66-4844-a100-be9a70af2bb7-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.236843 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe0685f-3f66-4844-a100-be9a70af2bb7-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.951732 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c8rmb" Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.988031 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c8rmb"] Dec 11 11:02:18 crc kubenswrapper[5016]: I1211 11:02:18.996806 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c8rmb"] Dec 11 11:02:19 crc kubenswrapper[5016]: I1211 11:02:19.487813 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbe0685f-3f66-4844-a100-be9a70af2bb7" path="/var/lib/kubelet/pods/dbe0685f-3f66-4844-a100-be9a70af2bb7/volumes" Dec 11 11:02:42 crc kubenswrapper[5016]: I1211 11:02:42.932646 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:02:42 crc kubenswrapper[5016]: I1211 11:02:42.933179 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:03:09 crc kubenswrapper[5016]: I1211 11:03:09.803873 5016 scope.go:117] "RemoveContainer" containerID="c13ef5cb6f5b82c13b0c2c2d0484802be6a440acefd3d7d7b1e9a00981f1453c" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.384218 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9k5c6"] Dec 11 11:03:12 crc kubenswrapper[5016]: E1211 11:03:12.385314 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe0685f-3f66-4844-a100-be9a70af2bb7" containerName="extract-utilities" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.385350 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe0685f-3f66-4844-a100-be9a70af2bb7" containerName="extract-utilities" Dec 11 11:03:12 crc kubenswrapper[5016]: E1211 11:03:12.385395 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe0685f-3f66-4844-a100-be9a70af2bb7" containerName="registry-server" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.385408 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe0685f-3f66-4844-a100-be9a70af2bb7" containerName="registry-server" Dec 11 11:03:12 crc kubenswrapper[5016]: E1211 11:03:12.385430 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe0685f-3f66-4844-a100-be9a70af2bb7" containerName="extract-content" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.385463 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe0685f-3f66-4844-a100-be9a70af2bb7" containerName="extract-content" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.385889 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbe0685f-3f66-4844-a100-be9a70af2bb7" containerName="registry-server" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.388477 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.394636 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9k5c6"] Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.535382 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a2e08d4-b882-4805-afac-90f7f2393e5b-catalog-content\") pod \"community-operators-9k5c6\" (UID: \"4a2e08d4-b882-4805-afac-90f7f2393e5b\") " pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.535471 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a2e08d4-b882-4805-afac-90f7f2393e5b-utilities\") pod \"community-operators-9k5c6\" (UID: \"4a2e08d4-b882-4805-afac-90f7f2393e5b\") " pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.536189 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrm5h\" (UniqueName: \"kubernetes.io/projected/4a2e08d4-b882-4805-afac-90f7f2393e5b-kube-api-access-nrm5h\") pod \"community-operators-9k5c6\" (UID: \"4a2e08d4-b882-4805-afac-90f7f2393e5b\") " pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.638875 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrm5h\" (UniqueName: \"kubernetes.io/projected/4a2e08d4-b882-4805-afac-90f7f2393e5b-kube-api-access-nrm5h\") pod \"community-operators-9k5c6\" (UID: \"4a2e08d4-b882-4805-afac-90f7f2393e5b\") " pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.638972 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a2e08d4-b882-4805-afac-90f7f2393e5b-catalog-content\") pod \"community-operators-9k5c6\" (UID: \"4a2e08d4-b882-4805-afac-90f7f2393e5b\") " pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.639029 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a2e08d4-b882-4805-afac-90f7f2393e5b-utilities\") pod \"community-operators-9k5c6\" (UID: \"4a2e08d4-b882-4805-afac-90f7f2393e5b\") " pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.639609 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a2e08d4-b882-4805-afac-90f7f2393e5b-catalog-content\") pod \"community-operators-9k5c6\" (UID: \"4a2e08d4-b882-4805-afac-90f7f2393e5b\") " pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.639620 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a2e08d4-b882-4805-afac-90f7f2393e5b-utilities\") pod \"community-operators-9k5c6\" (UID: \"4a2e08d4-b882-4805-afac-90f7f2393e5b\") " pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.662047 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrm5h\" (UniqueName: \"kubernetes.io/projected/4a2e08d4-b882-4805-afac-90f7f2393e5b-kube-api-access-nrm5h\") pod \"community-operators-9k5c6\" (UID: \"4a2e08d4-b882-4805-afac-90f7f2393e5b\") " pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.720683 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.933209 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.933549 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.933626 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.936411 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 11:03:12 crc kubenswrapper[5016]: I1211 11:03:12.936504 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" gracePeriod=600 Dec 11 11:03:13 crc kubenswrapper[5016]: E1211 11:03:13.185744 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:03:13 crc kubenswrapper[5016]: I1211 11:03:13.312312 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9k5c6"] Dec 11 11:03:13 crc kubenswrapper[5016]: I1211 11:03:13.527785 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9k5c6" event={"ID":"4a2e08d4-b882-4805-afac-90f7f2393e5b","Type":"ContainerStarted","Data":"f0e1cb438014823d5e1bedd1dc67d5c05fab65322f78ed1589aa41819285d2df"} Dec 11 11:03:13 crc kubenswrapper[5016]: I1211 11:03:13.530423 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" exitCode=0 Dec 11 11:03:13 crc kubenswrapper[5016]: I1211 11:03:13.530457 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756"} Dec 11 11:03:13 crc kubenswrapper[5016]: I1211 11:03:13.530489 5016 scope.go:117] "RemoveContainer" containerID="53da38b3e027c864a9592c4787654311b819c80dc57e5ec065e90c602166ceee" Dec 11 11:03:13 crc kubenswrapper[5016]: I1211 11:03:13.531316 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:03:13 crc kubenswrapper[5016]: E1211 11:03:13.531703 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:03:14 crc kubenswrapper[5016]: I1211 11:03:14.547561 5016 generic.go:334] "Generic (PLEG): container finished" podID="4a2e08d4-b882-4805-afac-90f7f2393e5b" containerID="0869ca5939d5db54248ddb1f4879f876f3f55bfdf105cabc4849892b0bff702b" exitCode=0 Dec 11 11:03:14 crc kubenswrapper[5016]: I1211 11:03:14.547624 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9k5c6" event={"ID":"4a2e08d4-b882-4805-afac-90f7f2393e5b","Type":"ContainerDied","Data":"0869ca5939d5db54248ddb1f4879f876f3f55bfdf105cabc4849892b0bff702b"} Dec 11 11:03:17 crc kubenswrapper[5016]: I1211 11:03:17.576987 5016 generic.go:334] "Generic (PLEG): container finished" podID="4a2e08d4-b882-4805-afac-90f7f2393e5b" containerID="4f25cfccfcdebe327f3f97eda9340c0b21f888fced85dc8bd54c0c84d8ead98e" exitCode=0 Dec 11 11:03:17 crc kubenswrapper[5016]: I1211 11:03:17.577694 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9k5c6" event={"ID":"4a2e08d4-b882-4805-afac-90f7f2393e5b","Type":"ContainerDied","Data":"4f25cfccfcdebe327f3f97eda9340c0b21f888fced85dc8bd54c0c84d8ead98e"} Dec 11 11:03:19 crc kubenswrapper[5016]: I1211 11:03:19.601197 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9k5c6" event={"ID":"4a2e08d4-b882-4805-afac-90f7f2393e5b","Type":"ContainerStarted","Data":"accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59"} Dec 11 11:03:19 crc kubenswrapper[5016]: I1211 11:03:19.639607 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9k5c6" podStartSLOduration=3.783288245 podStartE2EDuration="7.639574459s" podCreationTimestamp="2025-12-11 11:03:12 +0000 UTC" firstStartedPulling="2025-12-11 11:03:14.551301199 +0000 UTC m=+1711.369860778" lastFinishedPulling="2025-12-11 11:03:18.407587413 +0000 UTC m=+1715.226146992" observedRunningTime="2025-12-11 11:03:19.619935127 +0000 UTC m=+1716.438494706" watchObservedRunningTime="2025-12-11 11:03:19.639574459 +0000 UTC m=+1716.458134038" Dec 11 11:03:22 crc kubenswrapper[5016]: I1211 11:03:22.722565 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:22 crc kubenswrapper[5016]: I1211 11:03:22.723197 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:22 crc kubenswrapper[5016]: I1211 11:03:22.772395 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:23 crc kubenswrapper[5016]: I1211 11:03:23.690252 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:23 crc kubenswrapper[5016]: I1211 11:03:23.741851 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9k5c6"] Dec 11 11:03:25 crc kubenswrapper[5016]: I1211 11:03:25.474998 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:03:25 crc kubenswrapper[5016]: E1211 11:03:25.475550 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:03:25 crc kubenswrapper[5016]: I1211 11:03:25.661882 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9k5c6" podUID="4a2e08d4-b882-4805-afac-90f7f2393e5b" containerName="registry-server" containerID="cri-o://accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59" gracePeriod=2 Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.121503 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.246176 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a2e08d4-b882-4805-afac-90f7f2393e5b-utilities\") pod \"4a2e08d4-b882-4805-afac-90f7f2393e5b\" (UID: \"4a2e08d4-b882-4805-afac-90f7f2393e5b\") " Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.246516 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrm5h\" (UniqueName: \"kubernetes.io/projected/4a2e08d4-b882-4805-afac-90f7f2393e5b-kube-api-access-nrm5h\") pod \"4a2e08d4-b882-4805-afac-90f7f2393e5b\" (UID: \"4a2e08d4-b882-4805-afac-90f7f2393e5b\") " Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.246601 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a2e08d4-b882-4805-afac-90f7f2393e5b-catalog-content\") pod \"4a2e08d4-b882-4805-afac-90f7f2393e5b\" (UID: \"4a2e08d4-b882-4805-afac-90f7f2393e5b\") " Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.247782 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a2e08d4-b882-4805-afac-90f7f2393e5b-utilities" (OuterVolumeSpecName: "utilities") pod "4a2e08d4-b882-4805-afac-90f7f2393e5b" (UID: "4a2e08d4-b882-4805-afac-90f7f2393e5b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.256272 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a2e08d4-b882-4805-afac-90f7f2393e5b-kube-api-access-nrm5h" (OuterVolumeSpecName: "kube-api-access-nrm5h") pod "4a2e08d4-b882-4805-afac-90f7f2393e5b" (UID: "4a2e08d4-b882-4805-afac-90f7f2393e5b"). InnerVolumeSpecName "kube-api-access-nrm5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.303975 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a2e08d4-b882-4805-afac-90f7f2393e5b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a2e08d4-b882-4805-afac-90f7f2393e5b" (UID: "4a2e08d4-b882-4805-afac-90f7f2393e5b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.349698 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrm5h\" (UniqueName: \"kubernetes.io/projected/4a2e08d4-b882-4805-afac-90f7f2393e5b-kube-api-access-nrm5h\") on node \"crc\" DevicePath \"\"" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.349750 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a2e08d4-b882-4805-afac-90f7f2393e5b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.349761 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a2e08d4-b882-4805-afac-90f7f2393e5b-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.675068 5016 generic.go:334] "Generic (PLEG): container finished" podID="4a2e08d4-b882-4805-afac-90f7f2393e5b" containerID="accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59" exitCode=0 Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.675125 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9k5c6" event={"ID":"4a2e08d4-b882-4805-afac-90f7f2393e5b","Type":"ContainerDied","Data":"accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59"} Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.675173 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9k5c6" event={"ID":"4a2e08d4-b882-4805-afac-90f7f2393e5b","Type":"ContainerDied","Data":"f0e1cb438014823d5e1bedd1dc67d5c05fab65322f78ed1589aa41819285d2df"} Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.675201 5016 scope.go:117] "RemoveContainer" containerID="accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.675278 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9k5c6" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.705579 5016 scope.go:117] "RemoveContainer" containerID="4f25cfccfcdebe327f3f97eda9340c0b21f888fced85dc8bd54c0c84d8ead98e" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.718703 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9k5c6"] Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.730057 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9k5c6"] Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.735969 5016 scope.go:117] "RemoveContainer" containerID="0869ca5939d5db54248ddb1f4879f876f3f55bfdf105cabc4849892b0bff702b" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.792836 5016 scope.go:117] "RemoveContainer" containerID="accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59" Dec 11 11:03:26 crc kubenswrapper[5016]: E1211 11:03:26.794803 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59\": container with ID starting with accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59 not found: ID does not exist" containerID="accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.794841 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59"} err="failed to get container status \"accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59\": rpc error: code = NotFound desc = could not find container \"accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59\": container with ID starting with accaf2375365e169e4444679da9612d202c77ae4d337c63df51d35dd929f2d59 not found: ID does not exist" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.794867 5016 scope.go:117] "RemoveContainer" containerID="4f25cfccfcdebe327f3f97eda9340c0b21f888fced85dc8bd54c0c84d8ead98e" Dec 11 11:03:26 crc kubenswrapper[5016]: E1211 11:03:26.795578 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f25cfccfcdebe327f3f97eda9340c0b21f888fced85dc8bd54c0c84d8ead98e\": container with ID starting with 4f25cfccfcdebe327f3f97eda9340c0b21f888fced85dc8bd54c0c84d8ead98e not found: ID does not exist" containerID="4f25cfccfcdebe327f3f97eda9340c0b21f888fced85dc8bd54c0c84d8ead98e" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.795606 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f25cfccfcdebe327f3f97eda9340c0b21f888fced85dc8bd54c0c84d8ead98e"} err="failed to get container status \"4f25cfccfcdebe327f3f97eda9340c0b21f888fced85dc8bd54c0c84d8ead98e\": rpc error: code = NotFound desc = could not find container \"4f25cfccfcdebe327f3f97eda9340c0b21f888fced85dc8bd54c0c84d8ead98e\": container with ID starting with 4f25cfccfcdebe327f3f97eda9340c0b21f888fced85dc8bd54c0c84d8ead98e not found: ID does not exist" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.795624 5016 scope.go:117] "RemoveContainer" containerID="0869ca5939d5db54248ddb1f4879f876f3f55bfdf105cabc4849892b0bff702b" Dec 11 11:03:26 crc kubenswrapper[5016]: E1211 11:03:26.796129 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0869ca5939d5db54248ddb1f4879f876f3f55bfdf105cabc4849892b0bff702b\": container with ID starting with 0869ca5939d5db54248ddb1f4879f876f3f55bfdf105cabc4849892b0bff702b not found: ID does not exist" containerID="0869ca5939d5db54248ddb1f4879f876f3f55bfdf105cabc4849892b0bff702b" Dec 11 11:03:26 crc kubenswrapper[5016]: I1211 11:03:26.796156 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0869ca5939d5db54248ddb1f4879f876f3f55bfdf105cabc4849892b0bff702b"} err="failed to get container status \"0869ca5939d5db54248ddb1f4879f876f3f55bfdf105cabc4849892b0bff702b\": rpc error: code = NotFound desc = could not find container \"0869ca5939d5db54248ddb1f4879f876f3f55bfdf105cabc4849892b0bff702b\": container with ID starting with 0869ca5939d5db54248ddb1f4879f876f3f55bfdf105cabc4849892b0bff702b not found: ID does not exist" Dec 11 11:03:27 crc kubenswrapper[5016]: I1211 11:03:27.486107 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a2e08d4-b882-4805-afac-90f7f2393e5b" path="/var/lib/kubelet/pods/4a2e08d4-b882-4805-afac-90f7f2393e5b/volumes" Dec 11 11:03:37 crc kubenswrapper[5016]: I1211 11:03:37.475436 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:03:37 crc kubenswrapper[5016]: E1211 11:03:37.476494 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:03:51 crc kubenswrapper[5016]: I1211 11:03:51.475157 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:03:51 crc kubenswrapper[5016]: E1211 11:03:51.477102 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:04:05 crc kubenswrapper[5016]: I1211 11:04:05.475473 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:04:05 crc kubenswrapper[5016]: E1211 11:04:05.476540 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:04:17 crc kubenswrapper[5016]: I1211 11:04:17.474908 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:04:17 crc kubenswrapper[5016]: E1211 11:04:17.475747 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:04:31 crc kubenswrapper[5016]: I1211 11:04:31.474715 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:04:31 crc kubenswrapper[5016]: E1211 11:04:31.476932 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:04:43 crc kubenswrapper[5016]: I1211 11:04:43.480894 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:04:43 crc kubenswrapper[5016]: E1211 11:04:43.481640 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:04:57 crc kubenswrapper[5016]: I1211 11:04:57.475527 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:04:57 crc kubenswrapper[5016]: E1211 11:04:57.476270 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:05:12 crc kubenswrapper[5016]: I1211 11:05:12.475207 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:05:12 crc kubenswrapper[5016]: E1211 11:05:12.477626 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:05:18 crc kubenswrapper[5016]: I1211 11:05:18.843954 5016 generic.go:334] "Generic (PLEG): container finished" podID="10792fe7-d5d5-4918-8658-20331647f302" containerID="9efb1f94d48be5529e468ef34f04cb07b9df1d1697619f25e2626c73a1f15dc8" exitCode=0 Dec 11 11:05:18 crc kubenswrapper[5016]: I1211 11:05:18.844131 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" event={"ID":"10792fe7-d5d5-4918-8658-20331647f302","Type":"ContainerDied","Data":"9efb1f94d48be5529e468ef34f04cb07b9df1d1697619f25e2626c73a1f15dc8"} Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.051620 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-9c93-account-create-update-kwv2t"] Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.061382 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-1dd0-account-create-update-26v9f"] Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.071881 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5ecb-account-create-update-t2kgc"] Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.081606 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-nxzgl"] Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.104288 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-49xxd"] Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.115197 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-9c93-account-create-update-kwv2t"] Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.131499 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-5rlq6"] Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.142647 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-1dd0-account-create-update-26v9f"] Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.151652 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-5ecb-account-create-update-t2kgc"] Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.163023 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-49xxd"] Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.174400 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-nxzgl"] Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.186139 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-5rlq6"] Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.486029 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4af08fef-0f95-4448-bd66-b84589609611" path="/var/lib/kubelet/pods/4af08fef-0f95-4448-bd66-b84589609611/volumes" Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.486585 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e6d363c-70c9-4659-aa69-dc91e3f86e07" path="/var/lib/kubelet/pods/5e6d363c-70c9-4659-aa69-dc91e3f86e07/volumes" Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.487155 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0936b89-101b-4e57-81e5-756361104037" path="/var/lib/kubelet/pods/a0936b89-101b-4e57-81e5-756361104037/volumes" Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.487708 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3710c5e-0042-4f48-861d-c793cf81e42f" path="/var/lib/kubelet/pods/c3710c5e-0042-4f48-861d-c793cf81e42f/volumes" Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.488762 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c798030e-6efe-4b81-9d60-96dc199e420a" path="/var/lib/kubelet/pods/c798030e-6efe-4b81-9d60-96dc199e420a/volumes" Dec 11 11:05:19 crc kubenswrapper[5016]: I1211 11:05:19.489343 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e23b5f15-b825-4869-a7f8-93a8c60a090a" path="/var/lib/kubelet/pods/e23b5f15-b825-4869-a7f8-93a8c60a090a/volumes" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.280096 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.441029 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-bootstrap-combined-ca-bundle\") pod \"10792fe7-d5d5-4918-8658-20331647f302\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.441102 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-ssh-key\") pod \"10792fe7-d5d5-4918-8658-20331647f302\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.441324 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-inventory\") pod \"10792fe7-d5d5-4918-8658-20331647f302\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.442171 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cznj6\" (UniqueName: \"kubernetes.io/projected/10792fe7-d5d5-4918-8658-20331647f302-kube-api-access-cznj6\") pod \"10792fe7-d5d5-4918-8658-20331647f302\" (UID: \"10792fe7-d5d5-4918-8658-20331647f302\") " Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.475160 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10792fe7-d5d5-4918-8658-20331647f302-kube-api-access-cznj6" (OuterVolumeSpecName: "kube-api-access-cznj6") pod "10792fe7-d5d5-4918-8658-20331647f302" (UID: "10792fe7-d5d5-4918-8658-20331647f302"). InnerVolumeSpecName "kube-api-access-cznj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.476132 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "10792fe7-d5d5-4918-8658-20331647f302" (UID: "10792fe7-d5d5-4918-8658-20331647f302"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.515243 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "10792fe7-d5d5-4918-8658-20331647f302" (UID: "10792fe7-d5d5-4918-8658-20331647f302"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.538133 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-inventory" (OuterVolumeSpecName: "inventory") pod "10792fe7-d5d5-4918-8658-20331647f302" (UID: "10792fe7-d5d5-4918-8658-20331647f302"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.547722 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.547757 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cznj6\" (UniqueName: \"kubernetes.io/projected/10792fe7-d5d5-4918-8658-20331647f302-kube-api-access-cznj6\") on node \"crc\" DevicePath \"\"" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.547770 5016 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.547781 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/10792fe7-d5d5-4918-8658-20331647f302-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.864689 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" event={"ID":"10792fe7-d5d5-4918-8658-20331647f302","Type":"ContainerDied","Data":"0b68e1279c8850cd104b4c0010e9a597ccfc752d981bbb1baf3798137c5dd367"} Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.865036 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b68e1279c8850cd104b4c0010e9a597ccfc752d981bbb1baf3798137c5dd367" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.864754 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.954349 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv"] Dec 11 11:05:20 crc kubenswrapper[5016]: E1211 11:05:20.954876 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10792fe7-d5d5-4918-8658-20331647f302" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.954903 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="10792fe7-d5d5-4918-8658-20331647f302" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 11 11:05:20 crc kubenswrapper[5016]: E1211 11:05:20.954934 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a2e08d4-b882-4805-afac-90f7f2393e5b" containerName="extract-content" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.954956 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a2e08d4-b882-4805-afac-90f7f2393e5b" containerName="extract-content" Dec 11 11:05:20 crc kubenswrapper[5016]: E1211 11:05:20.954991 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a2e08d4-b882-4805-afac-90f7f2393e5b" containerName="registry-server" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.954998 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a2e08d4-b882-4805-afac-90f7f2393e5b" containerName="registry-server" Dec 11 11:05:20 crc kubenswrapper[5016]: E1211 11:05:20.955009 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a2e08d4-b882-4805-afac-90f7f2393e5b" containerName="extract-utilities" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.955016 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a2e08d4-b882-4805-afac-90f7f2393e5b" containerName="extract-utilities" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.955249 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="10792fe7-d5d5-4918-8658-20331647f302" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.955269 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a2e08d4-b882-4805-afac-90f7f2393e5b" containerName="registry-server" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.956078 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.959094 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.959409 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.959584 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.959783 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:05:20 crc kubenswrapper[5016]: I1211 11:05:20.976020 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv"] Dec 11 11:05:21 crc kubenswrapper[5016]: I1211 11:05:21.057177 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xc9w2\" (UniqueName: \"kubernetes.io/projected/43a2a77d-f6c4-40ba-8258-ee6bced589f2-kube-api-access-xc9w2\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv\" (UID: \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:05:21 crc kubenswrapper[5016]: I1211 11:05:21.057459 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43a2a77d-f6c4-40ba-8258-ee6bced589f2-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv\" (UID: \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:05:21 crc kubenswrapper[5016]: I1211 11:05:21.057671 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43a2a77d-f6c4-40ba-8258-ee6bced589f2-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv\" (UID: \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:05:21 crc kubenswrapper[5016]: I1211 11:05:21.159345 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xc9w2\" (UniqueName: \"kubernetes.io/projected/43a2a77d-f6c4-40ba-8258-ee6bced589f2-kube-api-access-xc9w2\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv\" (UID: \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:05:21 crc kubenswrapper[5016]: I1211 11:05:21.159399 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43a2a77d-f6c4-40ba-8258-ee6bced589f2-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv\" (UID: \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:05:21 crc kubenswrapper[5016]: I1211 11:05:21.159550 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43a2a77d-f6c4-40ba-8258-ee6bced589f2-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv\" (UID: \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:05:21 crc kubenswrapper[5016]: I1211 11:05:21.166008 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43a2a77d-f6c4-40ba-8258-ee6bced589f2-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv\" (UID: \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:05:21 crc kubenswrapper[5016]: I1211 11:05:21.167802 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43a2a77d-f6c4-40ba-8258-ee6bced589f2-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv\" (UID: \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:05:21 crc kubenswrapper[5016]: I1211 11:05:21.180213 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xc9w2\" (UniqueName: \"kubernetes.io/projected/43a2a77d-f6c4-40ba-8258-ee6bced589f2-kube-api-access-xc9w2\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv\" (UID: \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:05:21 crc kubenswrapper[5016]: I1211 11:05:21.326130 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:05:21 crc kubenswrapper[5016]: I1211 11:05:21.868647 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv"] Dec 11 11:05:21 crc kubenswrapper[5016]: I1211 11:05:21.872532 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 11:05:22 crc kubenswrapper[5016]: I1211 11:05:22.893048 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" event={"ID":"43a2a77d-f6c4-40ba-8258-ee6bced589f2","Type":"ContainerStarted","Data":"48546b1af120853264e5233990872ca5c3449995fc03a734cb9bdfedcc762db2"} Dec 11 11:05:22 crc kubenswrapper[5016]: I1211 11:05:22.893556 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" event={"ID":"43a2a77d-f6c4-40ba-8258-ee6bced589f2","Type":"ContainerStarted","Data":"092a2f30f8e05c9fe7fdd1a32d2d60bea794b0e1453d04f02ba39598fb9618c8"} Dec 11 11:05:22 crc kubenswrapper[5016]: I1211 11:05:22.921518 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" podStartSLOduration=2.436103393 podStartE2EDuration="2.921491439s" podCreationTimestamp="2025-12-11 11:05:20 +0000 UTC" firstStartedPulling="2025-12-11 11:05:21.872169215 +0000 UTC m=+1838.690728794" lastFinishedPulling="2025-12-11 11:05:22.357557261 +0000 UTC m=+1839.176116840" observedRunningTime="2025-12-11 11:05:22.909967468 +0000 UTC m=+1839.728527057" watchObservedRunningTime="2025-12-11 11:05:22.921491439 +0000 UTC m=+1839.740051038" Dec 11 11:05:23 crc kubenswrapper[5016]: I1211 11:05:23.481783 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:05:23 crc kubenswrapper[5016]: E1211 11:05:23.482188 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:05:37 crc kubenswrapper[5016]: I1211 11:05:37.474614 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:05:37 crc kubenswrapper[5016]: E1211 11:05:37.475405 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:05:50 crc kubenswrapper[5016]: I1211 11:05:50.475761 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:05:50 crc kubenswrapper[5016]: E1211 11:05:50.476575 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:05:51 crc kubenswrapper[5016]: I1211 11:05:51.049586 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-7jsb2"] Dec 11 11:05:51 crc kubenswrapper[5016]: I1211 11:05:51.057484 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-7jsb2"] Dec 11 11:05:51 crc kubenswrapper[5016]: I1211 11:05:51.487928 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b54c94f2-993f-4595-9878-b14557d8bb18" path="/var/lib/kubelet/pods/b54c94f2-993f-4595-9878-b14557d8bb18/volumes" Dec 11 11:05:57 crc kubenswrapper[5016]: I1211 11:05:57.032040 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-sccfv"] Dec 11 11:05:57 crc kubenswrapper[5016]: I1211 11:05:57.043654 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-a34d-account-create-update-kc72d"] Dec 11 11:05:57 crc kubenswrapper[5016]: I1211 11:05:57.054561 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-a34d-account-create-update-kc72d"] Dec 11 11:05:57 crc kubenswrapper[5016]: I1211 11:05:57.065357 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-sccfv"] Dec 11 11:05:57 crc kubenswrapper[5016]: I1211 11:05:57.489336 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03c01ad0-d6ea-441d-9c31-ad70526210fe" path="/var/lib/kubelet/pods/03c01ad0-d6ea-441d-9c31-ad70526210fe/volumes" Dec 11 11:05:57 crc kubenswrapper[5016]: I1211 11:05:57.490114 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37302689-affc-4cf7-87af-b83bb550b54f" path="/var/lib/kubelet/pods/37302689-affc-4cf7-87af-b83bb550b54f/volumes" Dec 11 11:06:01 crc kubenswrapper[5016]: I1211 11:06:01.047281 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8165-account-create-update-tq5mc"] Dec 11 11:06:01 crc kubenswrapper[5016]: I1211 11:06:01.059656 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-nlrcw"] Dec 11 11:06:01 crc kubenswrapper[5016]: I1211 11:06:01.072254 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-d0d0-account-create-update-4d9n5"] Dec 11 11:06:01 crc kubenswrapper[5016]: I1211 11:06:01.082117 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-nlrcw"] Dec 11 11:06:01 crc kubenswrapper[5016]: I1211 11:06:01.091479 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8165-account-create-update-tq5mc"] Dec 11 11:06:01 crc kubenswrapper[5016]: I1211 11:06:01.099453 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-d0d0-account-create-update-4d9n5"] Dec 11 11:06:01 crc kubenswrapper[5016]: I1211 11:06:01.108640 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-v969h"] Dec 11 11:06:01 crc kubenswrapper[5016]: I1211 11:06:01.117392 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-v969h"] Dec 11 11:06:01 crc kubenswrapper[5016]: I1211 11:06:01.487725 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3617c283-48bc-48fb-8421-b67c914e54ed" path="/var/lib/kubelet/pods/3617c283-48bc-48fb-8421-b67c914e54ed/volumes" Dec 11 11:06:01 crc kubenswrapper[5016]: I1211 11:06:01.488637 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5450c752-02be-4423-9f58-0a840439f5f4" path="/var/lib/kubelet/pods/5450c752-02be-4423-9f58-0a840439f5f4/volumes" Dec 11 11:06:01 crc kubenswrapper[5016]: I1211 11:06:01.489203 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6465136-7a18-47c1-bab2-b1a407a75ea2" path="/var/lib/kubelet/pods/d6465136-7a18-47c1-bab2-b1a407a75ea2/volumes" Dec 11 11:06:01 crc kubenswrapper[5016]: I1211 11:06:01.489756 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8d7f1ca-06ea-4138-bb6c-56fa633775b9" path="/var/lib/kubelet/pods/f8d7f1ca-06ea-4138-bb6c-56fa633775b9/volumes" Dec 11 11:06:03 crc kubenswrapper[5016]: I1211 11:06:03.486440 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:06:03 crc kubenswrapper[5016]: E1211 11:06:03.487081 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:06:07 crc kubenswrapper[5016]: I1211 11:06:07.034176 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-ng4n6"] Dec 11 11:06:07 crc kubenswrapper[5016]: I1211 11:06:07.049697 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-ng4n6"] Dec 11 11:06:07 crc kubenswrapper[5016]: I1211 11:06:07.486724 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96177636-5cb3-41e8-bf8b-1f34597a85ac" path="/var/lib/kubelet/pods/96177636-5cb3-41e8-bf8b-1f34597a85ac/volumes" Dec 11 11:06:09 crc kubenswrapper[5016]: I1211 11:06:09.943314 5016 scope.go:117] "RemoveContainer" containerID="426d29ba7043089bb47e27086bb0ae362c8aff3274315409d4980ee4491cc493" Dec 11 11:06:09 crc kubenswrapper[5016]: I1211 11:06:09.971790 5016 scope.go:117] "RemoveContainer" containerID="9f9918fe3ab9d959f65a3b9db625c833dbf785de2f412b5e077f278554022107" Dec 11 11:06:10 crc kubenswrapper[5016]: I1211 11:06:10.039480 5016 scope.go:117] "RemoveContainer" containerID="18b2b1300a89f2085d38838c70a3361cf9342d758fda36b5bfcc42aad0c37219" Dec 11 11:06:10 crc kubenswrapper[5016]: I1211 11:06:10.078893 5016 scope.go:117] "RemoveContainer" containerID="fa6774202c1e09cb4d25a552c564bacfd6372911d02c77d0ed7c3078238d169b" Dec 11 11:06:10 crc kubenswrapper[5016]: I1211 11:06:10.133814 5016 scope.go:117] "RemoveContainer" containerID="bf47c557f33afb7ef18f3e16124eedaa525c3b7b1090d21b8f9c8d7b2e98a427" Dec 11 11:06:10 crc kubenswrapper[5016]: I1211 11:06:10.173260 5016 scope.go:117] "RemoveContainer" containerID="16c6e2c392d039de765be94a80fa7860a850e61ecb31c47031e683a998e7787b" Dec 11 11:06:10 crc kubenswrapper[5016]: I1211 11:06:10.220796 5016 scope.go:117] "RemoveContainer" containerID="ad32aec390c81c640a9f6fdcedcdb8c610dd6fbc2e7f2c1df1df14262d574d39" Dec 11 11:06:10 crc kubenswrapper[5016]: I1211 11:06:10.257308 5016 scope.go:117] "RemoveContainer" containerID="5636b2ff84eaf306130fc233af14fa56371d34df685f64f43cec59011b843098" Dec 11 11:06:10 crc kubenswrapper[5016]: I1211 11:06:10.277639 5016 scope.go:117] "RemoveContainer" containerID="9e1d76e1b7fd2318bec5c37411dc7c109345d076eab39700173eba995a3e2fa1" Dec 11 11:06:10 crc kubenswrapper[5016]: I1211 11:06:10.298265 5016 scope.go:117] "RemoveContainer" containerID="0a7cba47a64ef55b5c9266c0d7a7d17a143ec05d43653f9358774cfbe7ecf486" Dec 11 11:06:10 crc kubenswrapper[5016]: I1211 11:06:10.318007 5016 scope.go:117] "RemoveContainer" containerID="b5fab13e16c9ffe62925482318c215853371e60c6caf2f2784ca737de1ab7623" Dec 11 11:06:10 crc kubenswrapper[5016]: I1211 11:06:10.337740 5016 scope.go:117] "RemoveContainer" containerID="63ae5ae9a4fa7469c6fd35fc38346f5fbf4809490a5e36a0a9b08c9e7ace6c77" Dec 11 11:06:10 crc kubenswrapper[5016]: I1211 11:06:10.359798 5016 scope.go:117] "RemoveContainer" containerID="7bb507ca06f22c64bd69b2e6122981cbc6784ae00b2e0a29bacabc545f15c2f9" Dec 11 11:06:10 crc kubenswrapper[5016]: I1211 11:06:10.379695 5016 scope.go:117] "RemoveContainer" containerID="7a1667836ae9c494ac94ebe9a9c8a1484d54dc711893f846cb233edf5aea2e59" Dec 11 11:06:15 crc kubenswrapper[5016]: I1211 11:06:15.475357 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:06:15 crc kubenswrapper[5016]: E1211 11:06:15.476167 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:06:28 crc kubenswrapper[5016]: I1211 11:06:28.474471 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:06:28 crc kubenswrapper[5016]: E1211 11:06:28.475269 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:06:41 crc kubenswrapper[5016]: I1211 11:06:41.040379 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-jnv4g"] Dec 11 11:06:41 crc kubenswrapper[5016]: I1211 11:06:41.049023 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-jnv4g"] Dec 11 11:06:41 crc kubenswrapper[5016]: I1211 11:06:41.486776 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf05bc17-f548-45a7-a1c1-eb32b12957d2" path="/var/lib/kubelet/pods/cf05bc17-f548-45a7-a1c1-eb32b12957d2/volumes" Dec 11 11:06:42 crc kubenswrapper[5016]: I1211 11:06:42.475428 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:06:42 crc kubenswrapper[5016]: E1211 11:06:42.476147 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:06:49 crc kubenswrapper[5016]: I1211 11:06:49.034676 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-44rj9"] Dec 11 11:06:49 crc kubenswrapper[5016]: I1211 11:06:49.047781 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-44rj9"] Dec 11 11:06:49 crc kubenswrapper[5016]: I1211 11:06:49.487128 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae211270-86fb-4d5e-a028-49d60d9a6685" path="/var/lib/kubelet/pods/ae211270-86fb-4d5e-a028-49d60d9a6685/volumes" Dec 11 11:06:54 crc kubenswrapper[5016]: I1211 11:06:54.047775 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-vmt79"] Dec 11 11:06:54 crc kubenswrapper[5016]: I1211 11:06:54.058265 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-ckqjx"] Dec 11 11:06:54 crc kubenswrapper[5016]: I1211 11:06:54.076066 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-vmt79"] Dec 11 11:06:54 crc kubenswrapper[5016]: I1211 11:06:54.083900 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-ckqjx"] Dec 11 11:06:55 crc kubenswrapper[5016]: I1211 11:06:55.498749 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1091d9e0-69c8-499d-bf06-7aacc52d8ec6" path="/var/lib/kubelet/pods/1091d9e0-69c8-499d-bf06-7aacc52d8ec6/volumes" Dec 11 11:06:55 crc kubenswrapper[5016]: I1211 11:06:55.500876 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4b58628-6bc5-4fab-b806-9c1f615c006c" path="/var/lib/kubelet/pods/b4b58628-6bc5-4fab-b806-9c1f615c006c/volumes" Dec 11 11:06:56 crc kubenswrapper[5016]: I1211 11:06:56.475048 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:06:56 crc kubenswrapper[5016]: E1211 11:06:56.475349 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:07:08 crc kubenswrapper[5016]: I1211 11:07:08.475822 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:07:08 crc kubenswrapper[5016]: E1211 11:07:08.476986 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:07:10 crc kubenswrapper[5016]: I1211 11:07:10.689408 5016 scope.go:117] "RemoveContainer" containerID="9da786d94b83b298510438b22c45c1c64b2533494fe7d6bde5b66f709c2d31bc" Dec 11 11:07:10 crc kubenswrapper[5016]: I1211 11:07:10.754285 5016 scope.go:117] "RemoveContainer" containerID="cb6cc762b8d5740c91ba7612b7c34ba2f706e69d6383afe3c9fdfc6e22c5abc8" Dec 11 11:07:10 crc kubenswrapper[5016]: I1211 11:07:10.809071 5016 scope.go:117] "RemoveContainer" containerID="5b79cda94f5111ba9acbe94ca34b9803ca098509090c7de1d134798fe1282cb7" Dec 11 11:07:10 crc kubenswrapper[5016]: I1211 11:07:10.864986 5016 scope.go:117] "RemoveContainer" containerID="ccfcb3457a53b85eb1d0d770f71d50169d52f4bc04e8c43a3ff5c9bb83231090" Dec 11 11:07:11 crc kubenswrapper[5016]: I1211 11:07:11.041520 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-dgkjf"] Dec 11 11:07:11 crc kubenswrapper[5016]: I1211 11:07:11.050778 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-dgkjf"] Dec 11 11:07:11 crc kubenswrapper[5016]: I1211 11:07:11.488208 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d31ff49c-2515-4b93-b3b8-e776e3190ab7" path="/var/lib/kubelet/pods/d31ff49c-2515-4b93-b3b8-e776e3190ab7/volumes" Dec 11 11:07:23 crc kubenswrapper[5016]: I1211 11:07:23.481010 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:07:23 crc kubenswrapper[5016]: E1211 11:07:23.481779 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:07:34 crc kubenswrapper[5016]: I1211 11:07:34.476314 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:07:34 crc kubenswrapper[5016]: E1211 11:07:34.478420 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:07:46 crc kubenswrapper[5016]: I1211 11:07:46.287610 5016 generic.go:334] "Generic (PLEG): container finished" podID="43a2a77d-f6c4-40ba-8258-ee6bced589f2" containerID="48546b1af120853264e5233990872ca5c3449995fc03a734cb9bdfedcc762db2" exitCode=0 Dec 11 11:07:46 crc kubenswrapper[5016]: I1211 11:07:46.287702 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" event={"ID":"43a2a77d-f6c4-40ba-8258-ee6bced589f2","Type":"ContainerDied","Data":"48546b1af120853264e5233990872ca5c3449995fc03a734cb9bdfedcc762db2"} Dec 11 11:07:46 crc kubenswrapper[5016]: I1211 11:07:46.475606 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:07:46 crc kubenswrapper[5016]: E1211 11:07:46.476084 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:07:47 crc kubenswrapper[5016]: I1211 11:07:47.775073 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:07:47 crc kubenswrapper[5016]: I1211 11:07:47.959854 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xc9w2\" (UniqueName: \"kubernetes.io/projected/43a2a77d-f6c4-40ba-8258-ee6bced589f2-kube-api-access-xc9w2\") pod \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\" (UID: \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\") " Dec 11 11:07:47 crc kubenswrapper[5016]: I1211 11:07:47.959974 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43a2a77d-f6c4-40ba-8258-ee6bced589f2-inventory\") pod \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\" (UID: \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\") " Dec 11 11:07:47 crc kubenswrapper[5016]: I1211 11:07:47.960172 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43a2a77d-f6c4-40ba-8258-ee6bced589f2-ssh-key\") pod \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\" (UID: \"43a2a77d-f6c4-40ba-8258-ee6bced589f2\") " Dec 11 11:07:47 crc kubenswrapper[5016]: I1211 11:07:47.968713 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43a2a77d-f6c4-40ba-8258-ee6bced589f2-kube-api-access-xc9w2" (OuterVolumeSpecName: "kube-api-access-xc9w2") pod "43a2a77d-f6c4-40ba-8258-ee6bced589f2" (UID: "43a2a77d-f6c4-40ba-8258-ee6bced589f2"). InnerVolumeSpecName "kube-api-access-xc9w2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:07:47 crc kubenswrapper[5016]: I1211 11:07:47.990851 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43a2a77d-f6c4-40ba-8258-ee6bced589f2-inventory" (OuterVolumeSpecName: "inventory") pod "43a2a77d-f6c4-40ba-8258-ee6bced589f2" (UID: "43a2a77d-f6c4-40ba-8258-ee6bced589f2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:07:47 crc kubenswrapper[5016]: I1211 11:07:47.994376 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43a2a77d-f6c4-40ba-8258-ee6bced589f2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "43a2a77d-f6c4-40ba-8258-ee6bced589f2" (UID: "43a2a77d-f6c4-40ba-8258-ee6bced589f2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.062444 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xc9w2\" (UniqueName: \"kubernetes.io/projected/43a2a77d-f6c4-40ba-8258-ee6bced589f2-kube-api-access-xc9w2\") on node \"crc\" DevicePath \"\"" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.062484 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43a2a77d-f6c4-40ba-8258-ee6bced589f2-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.062497 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43a2a77d-f6c4-40ba-8258-ee6bced589f2-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.310580 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" event={"ID":"43a2a77d-f6c4-40ba-8258-ee6bced589f2","Type":"ContainerDied","Data":"092a2f30f8e05c9fe7fdd1a32d2d60bea794b0e1453d04f02ba39598fb9618c8"} Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.310629 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="092a2f30f8e05c9fe7fdd1a32d2d60bea794b0e1453d04f02ba39598fb9618c8" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.310645 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.408148 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh"] Dec 11 11:07:48 crc kubenswrapper[5016]: E1211 11:07:48.408617 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43a2a77d-f6c4-40ba-8258-ee6bced589f2" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.408642 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="43a2a77d-f6c4-40ba-8258-ee6bced589f2" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.408857 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="43a2a77d-f6c4-40ba-8258-ee6bced589f2" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.409563 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.413675 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.414049 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.418113 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.420422 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh"] Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.424157 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.572535 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh\" (UID: \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.572613 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh\" (UID: \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.572650 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxm8c\" (UniqueName: \"kubernetes.io/projected/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-kube-api-access-qxm8c\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh\" (UID: \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.675541 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh\" (UID: \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.676531 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh\" (UID: \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.676565 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxm8c\" (UniqueName: \"kubernetes.io/projected/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-kube-api-access-qxm8c\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh\" (UID: \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.680170 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh\" (UID: \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.680875 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh\" (UID: \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.694654 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxm8c\" (UniqueName: \"kubernetes.io/projected/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-kube-api-access-qxm8c\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh\" (UID: \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:07:48 crc kubenswrapper[5016]: I1211 11:07:48.730449 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:07:49 crc kubenswrapper[5016]: I1211 11:07:49.285553 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh"] Dec 11 11:07:49 crc kubenswrapper[5016]: I1211 11:07:49.321102 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" event={"ID":"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f","Type":"ContainerStarted","Data":"698908b94ee8234adb6e958593a8b36d0fa6c96963027a965602456e834faeff"} Dec 11 11:07:51 crc kubenswrapper[5016]: I1211 11:07:51.039608 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-n4qm6"] Dec 11 11:07:51 crc kubenswrapper[5016]: I1211 11:07:51.048212 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-n4qm6"] Dec 11 11:07:51 crc kubenswrapper[5016]: I1211 11:07:51.723352 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8a2de44-054b-4a31-8d4e-d88d349d59f5" path="/var/lib/kubelet/pods/c8a2de44-054b-4a31-8d4e-d88d349d59f5/volumes" Dec 11 11:07:52 crc kubenswrapper[5016]: I1211 11:07:52.043601 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-n667p"] Dec 11 11:07:52 crc kubenswrapper[5016]: I1211 11:07:52.056820 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-n667p"] Dec 11 11:07:52 crc kubenswrapper[5016]: I1211 11:07:52.067257 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-edf2-account-create-update-zn58v"] Dec 11 11:07:52 crc kubenswrapper[5016]: I1211 11:07:52.077612 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-edf2-account-create-update-zn58v"] Dec 11 11:07:52 crc kubenswrapper[5016]: I1211 11:07:52.737102 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" event={"ID":"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f","Type":"ContainerStarted","Data":"3455cef954dd516615cd8695b126b5cc61fb50a642822774bfae1f127914cb7c"} Dec 11 11:07:52 crc kubenswrapper[5016]: I1211 11:07:52.756132 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" podStartSLOduration=2.204406323 podStartE2EDuration="4.75610282s" podCreationTimestamp="2025-12-11 11:07:48 +0000 UTC" firstStartedPulling="2025-12-11 11:07:49.293863587 +0000 UTC m=+1986.112423166" lastFinishedPulling="2025-12-11 11:07:51.845560084 +0000 UTC m=+1988.664119663" observedRunningTime="2025-12-11 11:07:52.755431734 +0000 UTC m=+1989.573991313" watchObservedRunningTime="2025-12-11 11:07:52.75610282 +0000 UTC m=+1989.574662399" Dec 11 11:07:53 crc kubenswrapper[5016]: I1211 11:07:53.032559 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-r4v66"] Dec 11 11:07:53 crc kubenswrapper[5016]: I1211 11:07:53.044677 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-r4v66"] Dec 11 11:07:53 crc kubenswrapper[5016]: I1211 11:07:53.054986 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-57f7-account-create-update-8rkld"] Dec 11 11:07:53 crc kubenswrapper[5016]: I1211 11:07:53.063501 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-57f7-account-create-update-8rkld"] Dec 11 11:07:53 crc kubenswrapper[5016]: I1211 11:07:53.072869 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-2d6c-account-create-update-6j78f"] Dec 11 11:07:53 crc kubenswrapper[5016]: I1211 11:07:53.081077 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-2d6c-account-create-update-6j78f"] Dec 11 11:07:53 crc kubenswrapper[5016]: I1211 11:07:53.486683 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c350656-c130-4eef-8c2f-be3f74dc25f4" path="/var/lib/kubelet/pods/2c350656-c130-4eef-8c2f-be3f74dc25f4/volumes" Dec 11 11:07:53 crc kubenswrapper[5016]: I1211 11:07:53.487343 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41bed616-8e34-49a0-9ade-3b17f7988491" path="/var/lib/kubelet/pods/41bed616-8e34-49a0-9ade-3b17f7988491/volumes" Dec 11 11:07:53 crc kubenswrapper[5016]: I1211 11:07:53.487915 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a8b2169-5dd3-4e93-9b65-d665c3cf5e75" path="/var/lib/kubelet/pods/4a8b2169-5dd3-4e93-9b65-d665c3cf5e75/volumes" Dec 11 11:07:53 crc kubenswrapper[5016]: I1211 11:07:53.488730 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddbcb259-d1f7-4de6-b255-114890395ec8" path="/var/lib/kubelet/pods/ddbcb259-d1f7-4de6-b255-114890395ec8/volumes" Dec 11 11:07:53 crc kubenswrapper[5016]: I1211 11:07:53.489853 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd14602e-defa-4080-a30b-fa7736df6746" path="/var/lib/kubelet/pods/fd14602e-defa-4080-a30b-fa7736df6746/volumes" Dec 11 11:07:59 crc kubenswrapper[5016]: I1211 11:07:59.475620 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:07:59 crc kubenswrapper[5016]: E1211 11:07:59.476408 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:08:11 crc kubenswrapper[5016]: I1211 11:08:11.000186 5016 scope.go:117] "RemoveContainer" containerID="aee1e178f02878f7ba0e4b0d452285e680f104aaf81231b2b438c0e18c26df52" Dec 11 11:08:11 crc kubenswrapper[5016]: I1211 11:08:11.039753 5016 scope.go:117] "RemoveContainer" containerID="12cce264a30a2a12ec5a15c1064e8eec542a78f676eef42f65b94a06a644a7d9" Dec 11 11:08:11 crc kubenswrapper[5016]: I1211 11:08:11.122814 5016 scope.go:117] "RemoveContainer" containerID="a099f2b6d200efc80a8dc3d6bf02753567124070ecec1652ffc2bf85ac4df96e" Dec 11 11:08:11 crc kubenswrapper[5016]: I1211 11:08:11.144899 5016 scope.go:117] "RemoveContainer" containerID="3ecaa435ad620f9fac63414d2ce71452cc5013e77b2fbaab44abfaafabcf450c" Dec 11 11:08:11 crc kubenswrapper[5016]: I1211 11:08:11.204812 5016 scope.go:117] "RemoveContainer" containerID="e6bf1a49839e29ac76eba4b99538702468547e75fa013f904a5a8f51d778a93e" Dec 11 11:08:11 crc kubenswrapper[5016]: I1211 11:08:11.228012 5016 scope.go:117] "RemoveContainer" containerID="71a5476311bf6e02febabf4bca987e9680f8e1e367c5d796ed3419f5a92a2201" Dec 11 11:08:11 crc kubenswrapper[5016]: I1211 11:08:11.272103 5016 scope.go:117] "RemoveContainer" containerID="9a475d7ea16ee81335e7d10d73cba278d7eb3087310d516be0609cc4fa9a5b0f" Dec 11 11:08:11 crc kubenswrapper[5016]: I1211 11:08:11.321222 5016 scope.go:117] "RemoveContainer" containerID="91d9fe660aef116f37dfb9b8f665905a0223e5e78cec0b7dde90695de46db9c3" Dec 11 11:08:11 crc kubenswrapper[5016]: I1211 11:08:11.346902 5016 scope.go:117] "RemoveContainer" containerID="8d13c5d7ee7ef20caeb32d1e46126494f578e054a45b38ae0db4d48c7026ebc8" Dec 11 11:08:11 crc kubenswrapper[5016]: I1211 11:08:11.374151 5016 scope.go:117] "RemoveContainer" containerID="c18189ddbde556c4681560abf1832321cda0ce890ecfd6f8bf634c518c857395" Dec 11 11:08:14 crc kubenswrapper[5016]: I1211 11:08:14.474068 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:08:14 crc kubenswrapper[5016]: I1211 11:08:14.937563 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"745e2ce5229d52e8fc8364c6aec4dc08214336fac2ba63dc453c716f42c02e8d"} Dec 11 11:08:23 crc kubenswrapper[5016]: I1211 11:08:23.060695 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-w6xjr"] Dec 11 11:08:23 crc kubenswrapper[5016]: I1211 11:08:23.069857 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-w6xjr"] Dec 11 11:08:23 crc kubenswrapper[5016]: I1211 11:08:23.486076 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e3e1081-5b46-471a-8978-804c54a32bc9" path="/var/lib/kubelet/pods/7e3e1081-5b46-471a-8978-804c54a32bc9/volumes" Dec 11 11:08:42 crc kubenswrapper[5016]: I1211 11:08:42.060146 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-ftrtx"] Dec 11 11:08:42 crc kubenswrapper[5016]: I1211 11:08:42.068514 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-ftrtx"] Dec 11 11:08:43 crc kubenswrapper[5016]: I1211 11:08:43.487383 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b475a2c8-d3b9-4b61-a58a-a806599c689a" path="/var/lib/kubelet/pods/b475a2c8-d3b9-4b61-a58a-a806599c689a/volumes" Dec 11 11:08:52 crc kubenswrapper[5016]: I1211 11:08:52.040731 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-d5pmm"] Dec 11 11:08:52 crc kubenswrapper[5016]: I1211 11:08:52.064185 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-d5pmm"] Dec 11 11:08:53 crc kubenswrapper[5016]: I1211 11:08:53.488147 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80b378da-6397-4b63-8eb4-3a2a465e6425" path="/var/lib/kubelet/pods/80b378da-6397-4b63-8eb4-3a2a465e6425/volumes" Dec 11 11:09:10 crc kubenswrapper[5016]: I1211 11:09:10.524260 5016 generic.go:334] "Generic (PLEG): container finished" podID="6b68b5b9-fe7e-4340-8541-71c6f8b80f3f" containerID="3455cef954dd516615cd8695b126b5cc61fb50a642822774bfae1f127914cb7c" exitCode=0 Dec 11 11:09:10 crc kubenswrapper[5016]: I1211 11:09:10.524386 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" event={"ID":"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f","Type":"ContainerDied","Data":"3455cef954dd516615cd8695b126b5cc61fb50a642822774bfae1f127914cb7c"} Dec 11 11:09:11 crc kubenswrapper[5016]: I1211 11:09:11.556583 5016 scope.go:117] "RemoveContainer" containerID="46bfd12debb885d0c6820a06af23ca97f3b687bc9a3be01df22bec6887d980ae" Dec 11 11:09:11 crc kubenswrapper[5016]: I1211 11:09:11.632075 5016 scope.go:117] "RemoveContainer" containerID="b38178e5c54cc99963dad2af4beb7cc0015ee856d538a7f49d2b020adb5b4709" Dec 11 11:09:11 crc kubenswrapper[5016]: I1211 11:09:11.691093 5016 scope.go:117] "RemoveContainer" containerID="a7d200cf5638600ce4b49d4c48c3f919d152702a0254d8dee213992fe20f91b9" Dec 11 11:09:11 crc kubenswrapper[5016]: I1211 11:09:11.980153 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.106124 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxm8c\" (UniqueName: \"kubernetes.io/projected/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-kube-api-access-qxm8c\") pod \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\" (UID: \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\") " Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.106281 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-ssh-key\") pod \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\" (UID: \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\") " Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.106419 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-inventory\") pod \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\" (UID: \"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f\") " Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.113034 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-kube-api-access-qxm8c" (OuterVolumeSpecName: "kube-api-access-qxm8c") pod "6b68b5b9-fe7e-4340-8541-71c6f8b80f3f" (UID: "6b68b5b9-fe7e-4340-8541-71c6f8b80f3f"). InnerVolumeSpecName "kube-api-access-qxm8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.137692 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-inventory" (OuterVolumeSpecName: "inventory") pod "6b68b5b9-fe7e-4340-8541-71c6f8b80f3f" (UID: "6b68b5b9-fe7e-4340-8541-71c6f8b80f3f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.138402 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6b68b5b9-fe7e-4340-8541-71c6f8b80f3f" (UID: "6b68b5b9-fe7e-4340-8541-71c6f8b80f3f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.210465 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.210554 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxm8c\" (UniqueName: \"kubernetes.io/projected/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-kube-api-access-qxm8c\") on node \"crc\" DevicePath \"\"" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.210578 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b68b5b9-fe7e-4340-8541-71c6f8b80f3f-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.547293 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" event={"ID":"6b68b5b9-fe7e-4340-8541-71c6f8b80f3f","Type":"ContainerDied","Data":"698908b94ee8234adb6e958593a8b36d0fa6c96963027a965602456e834faeff"} Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.547348 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="698908b94ee8234adb6e958593a8b36d0fa6c96963027a965602456e834faeff" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.547438 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.655871 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql"] Dec 11 11:09:12 crc kubenswrapper[5016]: E1211 11:09:12.657235 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b68b5b9-fe7e-4340-8541-71c6f8b80f3f" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.657262 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b68b5b9-fe7e-4340-8541-71c6f8b80f3f" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.657612 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b68b5b9-fe7e-4340-8541-71c6f8b80f3f" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.658838 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.663042 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.663237 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.663362 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.663355 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.669467 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql"] Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.827099 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53387d31-b49f-4100-9772-a4f7d6898471-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql\" (UID: \"53387d31-b49f-4100-9772-a4f7d6898471\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.827212 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53387d31-b49f-4100-9772-a4f7d6898471-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql\" (UID: \"53387d31-b49f-4100-9772-a4f7d6898471\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.827268 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7psj\" (UniqueName: \"kubernetes.io/projected/53387d31-b49f-4100-9772-a4f7d6898471-kube-api-access-l7psj\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql\" (UID: \"53387d31-b49f-4100-9772-a4f7d6898471\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.929651 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53387d31-b49f-4100-9772-a4f7d6898471-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql\" (UID: \"53387d31-b49f-4100-9772-a4f7d6898471\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.929762 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53387d31-b49f-4100-9772-a4f7d6898471-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql\" (UID: \"53387d31-b49f-4100-9772-a4f7d6898471\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.929816 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7psj\" (UniqueName: \"kubernetes.io/projected/53387d31-b49f-4100-9772-a4f7d6898471-kube-api-access-l7psj\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql\" (UID: \"53387d31-b49f-4100-9772-a4f7d6898471\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.936420 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53387d31-b49f-4100-9772-a4f7d6898471-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql\" (UID: \"53387d31-b49f-4100-9772-a4f7d6898471\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.938658 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53387d31-b49f-4100-9772-a4f7d6898471-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql\" (UID: \"53387d31-b49f-4100-9772-a4f7d6898471\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.948717 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7psj\" (UniqueName: \"kubernetes.io/projected/53387d31-b49f-4100-9772-a4f7d6898471-kube-api-access-l7psj\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql\" (UID: \"53387d31-b49f-4100-9772-a4f7d6898471\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:12 crc kubenswrapper[5016]: I1211 11:09:12.977923 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:13 crc kubenswrapper[5016]: I1211 11:09:13.525184 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql"] Dec 11 11:09:13 crc kubenswrapper[5016]: I1211 11:09:13.562206 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" event={"ID":"53387d31-b49f-4100-9772-a4f7d6898471","Type":"ContainerStarted","Data":"f2606e5a024c3f8aa940493d5448ad5a960424239ac13ea541e8313ab0c9ada8"} Dec 11 11:09:15 crc kubenswrapper[5016]: I1211 11:09:15.583568 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" event={"ID":"53387d31-b49f-4100-9772-a4f7d6898471","Type":"ContainerStarted","Data":"af06f0e6b3268ec0e7c466f5a58df791fde7774c3c21f81df59397a5c65c7302"} Dec 11 11:09:15 crc kubenswrapper[5016]: I1211 11:09:15.599834 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" podStartSLOduration=2.822809696 podStartE2EDuration="3.599814522s" podCreationTimestamp="2025-12-11 11:09:12 +0000 UTC" firstStartedPulling="2025-12-11 11:09:13.527193717 +0000 UTC m=+2070.345753296" lastFinishedPulling="2025-12-11 11:09:14.304198543 +0000 UTC m=+2071.122758122" observedRunningTime="2025-12-11 11:09:15.598511061 +0000 UTC m=+2072.417070640" watchObservedRunningTime="2025-12-11 11:09:15.599814522 +0000 UTC m=+2072.418374091" Dec 11 11:09:19 crc kubenswrapper[5016]: I1211 11:09:19.629089 5016 generic.go:334] "Generic (PLEG): container finished" podID="53387d31-b49f-4100-9772-a4f7d6898471" containerID="af06f0e6b3268ec0e7c466f5a58df791fde7774c3c21f81df59397a5c65c7302" exitCode=0 Dec 11 11:09:19 crc kubenswrapper[5016]: I1211 11:09:19.629176 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" event={"ID":"53387d31-b49f-4100-9772-a4f7d6898471","Type":"ContainerDied","Data":"af06f0e6b3268ec0e7c466f5a58df791fde7774c3c21f81df59397a5c65c7302"} Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.107989 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.210069 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53387d31-b49f-4100-9772-a4f7d6898471-inventory\") pod \"53387d31-b49f-4100-9772-a4f7d6898471\" (UID: \"53387d31-b49f-4100-9772-a4f7d6898471\") " Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.210287 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53387d31-b49f-4100-9772-a4f7d6898471-ssh-key\") pod \"53387d31-b49f-4100-9772-a4f7d6898471\" (UID: \"53387d31-b49f-4100-9772-a4f7d6898471\") " Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.211401 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7psj\" (UniqueName: \"kubernetes.io/projected/53387d31-b49f-4100-9772-a4f7d6898471-kube-api-access-l7psj\") pod \"53387d31-b49f-4100-9772-a4f7d6898471\" (UID: \"53387d31-b49f-4100-9772-a4f7d6898471\") " Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.243135 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53387d31-b49f-4100-9772-a4f7d6898471-kube-api-access-l7psj" (OuterVolumeSpecName: "kube-api-access-l7psj") pod "53387d31-b49f-4100-9772-a4f7d6898471" (UID: "53387d31-b49f-4100-9772-a4f7d6898471"). InnerVolumeSpecName "kube-api-access-l7psj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.261053 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53387d31-b49f-4100-9772-a4f7d6898471-inventory" (OuterVolumeSpecName: "inventory") pod "53387d31-b49f-4100-9772-a4f7d6898471" (UID: "53387d31-b49f-4100-9772-a4f7d6898471"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.261647 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53387d31-b49f-4100-9772-a4f7d6898471-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "53387d31-b49f-4100-9772-a4f7d6898471" (UID: "53387d31-b49f-4100-9772-a4f7d6898471"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.314173 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53387d31-b49f-4100-9772-a4f7d6898471-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.314452 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7psj\" (UniqueName: \"kubernetes.io/projected/53387d31-b49f-4100-9772-a4f7d6898471-kube-api-access-l7psj\") on node \"crc\" DevicePath \"\"" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.314580 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53387d31-b49f-4100-9772-a4f7d6898471-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.651247 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" event={"ID":"53387d31-b49f-4100-9772-a4f7d6898471","Type":"ContainerDied","Data":"f2606e5a024c3f8aa940493d5448ad5a960424239ac13ea541e8313ab0c9ada8"} Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.651552 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2606e5a024c3f8aa940493d5448ad5a960424239ac13ea541e8313ab0c9ada8" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.651634 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.744816 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc"] Dec 11 11:09:21 crc kubenswrapper[5016]: E1211 11:09:21.745398 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53387d31-b49f-4100-9772-a4f7d6898471" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.745428 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="53387d31-b49f-4100-9772-a4f7d6898471" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.745702 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="53387d31-b49f-4100-9772-a4f7d6898471" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.746660 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.754403 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.754762 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.755097 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.755423 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.763024 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc"] Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.833091 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8n9zw\" (UniqueName: \"kubernetes.io/projected/b4760482-fee8-4399-bae9-a30831f41536-kube-api-access-8n9zw\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8rmhc\" (UID: \"b4760482-fee8-4399-bae9-a30831f41536\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.833219 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4760482-fee8-4399-bae9-a30831f41536-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8rmhc\" (UID: \"b4760482-fee8-4399-bae9-a30831f41536\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.833704 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4760482-fee8-4399-bae9-a30831f41536-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8rmhc\" (UID: \"b4760482-fee8-4399-bae9-a30831f41536\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.935290 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8n9zw\" (UniqueName: \"kubernetes.io/projected/b4760482-fee8-4399-bae9-a30831f41536-kube-api-access-8n9zw\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8rmhc\" (UID: \"b4760482-fee8-4399-bae9-a30831f41536\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.935387 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4760482-fee8-4399-bae9-a30831f41536-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8rmhc\" (UID: \"b4760482-fee8-4399-bae9-a30831f41536\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.935457 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4760482-fee8-4399-bae9-a30831f41536-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8rmhc\" (UID: \"b4760482-fee8-4399-bae9-a30831f41536\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.941031 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4760482-fee8-4399-bae9-a30831f41536-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8rmhc\" (UID: \"b4760482-fee8-4399-bae9-a30831f41536\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.952663 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8n9zw\" (UniqueName: \"kubernetes.io/projected/b4760482-fee8-4399-bae9-a30831f41536-kube-api-access-8n9zw\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8rmhc\" (UID: \"b4760482-fee8-4399-bae9-a30831f41536\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:09:21 crc kubenswrapper[5016]: I1211 11:09:21.953089 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4760482-fee8-4399-bae9-a30831f41536-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8rmhc\" (UID: \"b4760482-fee8-4399-bae9-a30831f41536\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:09:22 crc kubenswrapper[5016]: I1211 11:09:22.072391 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:09:22 crc kubenswrapper[5016]: I1211 11:09:22.661160 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc"] Dec 11 11:09:23 crc kubenswrapper[5016]: I1211 11:09:23.672777 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" event={"ID":"b4760482-fee8-4399-bae9-a30831f41536","Type":"ContainerStarted","Data":"5061fc159b6090717c2faec2219740763c6199b5740ca70f5d270252db48923a"} Dec 11 11:09:23 crc kubenswrapper[5016]: I1211 11:09:23.673574 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" event={"ID":"b4760482-fee8-4399-bae9-a30831f41536","Type":"ContainerStarted","Data":"1333b56f71b625511ca791e2c47d46b5f69b56528dfbb58a9511bfcb9793acfc"} Dec 11 11:09:23 crc kubenswrapper[5016]: I1211 11:09:23.699182 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" podStartSLOduration=2.259957171 podStartE2EDuration="2.699153127s" podCreationTimestamp="2025-12-11 11:09:21 +0000 UTC" firstStartedPulling="2025-12-11 11:09:22.678635181 +0000 UTC m=+2079.497194760" lastFinishedPulling="2025-12-11 11:09:23.117831137 +0000 UTC m=+2079.936390716" observedRunningTime="2025-12-11 11:09:23.690058785 +0000 UTC m=+2080.508618384" watchObservedRunningTime="2025-12-11 11:09:23.699153127 +0000 UTC m=+2080.517712706" Dec 11 11:09:29 crc kubenswrapper[5016]: I1211 11:09:29.047017 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-sxslc"] Dec 11 11:09:29 crc kubenswrapper[5016]: I1211 11:09:29.054960 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-sxslc"] Dec 11 11:09:29 crc kubenswrapper[5016]: I1211 11:09:29.485617 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64fce467-b180-44c5-9d9c-e62505e87282" path="/var/lib/kubelet/pods/64fce467-b180-44c5-9d9c-e62505e87282/volumes" Dec 11 11:10:01 crc kubenswrapper[5016]: I1211 11:10:01.019206 5016 generic.go:334] "Generic (PLEG): container finished" podID="b4760482-fee8-4399-bae9-a30831f41536" containerID="5061fc159b6090717c2faec2219740763c6199b5740ca70f5d270252db48923a" exitCode=0 Dec 11 11:10:01 crc kubenswrapper[5016]: I1211 11:10:01.019305 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" event={"ID":"b4760482-fee8-4399-bae9-a30831f41536","Type":"ContainerDied","Data":"5061fc159b6090717c2faec2219740763c6199b5740ca70f5d270252db48923a"} Dec 11 11:10:02 crc kubenswrapper[5016]: I1211 11:10:02.473884 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:10:02 crc kubenswrapper[5016]: I1211 11:10:02.565299 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4760482-fee8-4399-bae9-a30831f41536-inventory\") pod \"b4760482-fee8-4399-bae9-a30831f41536\" (UID: \"b4760482-fee8-4399-bae9-a30831f41536\") " Dec 11 11:10:02 crc kubenswrapper[5016]: I1211 11:10:02.565631 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4760482-fee8-4399-bae9-a30831f41536-ssh-key\") pod \"b4760482-fee8-4399-bae9-a30831f41536\" (UID: \"b4760482-fee8-4399-bae9-a30831f41536\") " Dec 11 11:10:02 crc kubenswrapper[5016]: I1211 11:10:02.565710 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8n9zw\" (UniqueName: \"kubernetes.io/projected/b4760482-fee8-4399-bae9-a30831f41536-kube-api-access-8n9zw\") pod \"b4760482-fee8-4399-bae9-a30831f41536\" (UID: \"b4760482-fee8-4399-bae9-a30831f41536\") " Dec 11 11:10:02 crc kubenswrapper[5016]: I1211 11:10:02.575477 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4760482-fee8-4399-bae9-a30831f41536-kube-api-access-8n9zw" (OuterVolumeSpecName: "kube-api-access-8n9zw") pod "b4760482-fee8-4399-bae9-a30831f41536" (UID: "b4760482-fee8-4399-bae9-a30831f41536"). InnerVolumeSpecName "kube-api-access-8n9zw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:10:02 crc kubenswrapper[5016]: I1211 11:10:02.602286 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4760482-fee8-4399-bae9-a30831f41536-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b4760482-fee8-4399-bae9-a30831f41536" (UID: "b4760482-fee8-4399-bae9-a30831f41536"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:10:02 crc kubenswrapper[5016]: I1211 11:10:02.604926 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4760482-fee8-4399-bae9-a30831f41536-inventory" (OuterVolumeSpecName: "inventory") pod "b4760482-fee8-4399-bae9-a30831f41536" (UID: "b4760482-fee8-4399-bae9-a30831f41536"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:10:02 crc kubenswrapper[5016]: I1211 11:10:02.669137 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4760482-fee8-4399-bae9-a30831f41536-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:10:02 crc kubenswrapper[5016]: I1211 11:10:02.669199 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4760482-fee8-4399-bae9-a30831f41536-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:10:02 crc kubenswrapper[5016]: I1211 11:10:02.669212 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8n9zw\" (UniqueName: \"kubernetes.io/projected/b4760482-fee8-4399-bae9-a30831f41536-kube-api-access-8n9zw\") on node \"crc\" DevicePath \"\"" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.043142 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" event={"ID":"b4760482-fee8-4399-bae9-a30831f41536","Type":"ContainerDied","Data":"1333b56f71b625511ca791e2c47d46b5f69b56528dfbb58a9511bfcb9793acfc"} Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.043193 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1333b56f71b625511ca791e2c47d46b5f69b56528dfbb58a9511bfcb9793acfc" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.043194 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8rmhc" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.140983 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh"] Dec 11 11:10:03 crc kubenswrapper[5016]: E1211 11:10:03.141555 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4760482-fee8-4399-bae9-a30831f41536" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.141580 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4760482-fee8-4399-bae9-a30831f41536" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.141834 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4760482-fee8-4399-bae9-a30831f41536" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.142659 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.146246 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.147411 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.147761 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.149154 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.154489 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh"] Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.280991 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/baa674c6-426d-428e-af4a-dbff72b93714-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rckrh\" (UID: \"baa674c6-426d-428e-af4a-dbff72b93714\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.281092 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/baa674c6-426d-428e-af4a-dbff72b93714-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rckrh\" (UID: \"baa674c6-426d-428e-af4a-dbff72b93714\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.281163 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5wsm\" (UniqueName: \"kubernetes.io/projected/baa674c6-426d-428e-af4a-dbff72b93714-kube-api-access-k5wsm\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rckrh\" (UID: \"baa674c6-426d-428e-af4a-dbff72b93714\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.382864 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/baa674c6-426d-428e-af4a-dbff72b93714-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rckrh\" (UID: \"baa674c6-426d-428e-af4a-dbff72b93714\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.382975 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/baa674c6-426d-428e-af4a-dbff72b93714-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rckrh\" (UID: \"baa674c6-426d-428e-af4a-dbff72b93714\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.383050 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5wsm\" (UniqueName: \"kubernetes.io/projected/baa674c6-426d-428e-af4a-dbff72b93714-kube-api-access-k5wsm\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rckrh\" (UID: \"baa674c6-426d-428e-af4a-dbff72b93714\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.388629 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/baa674c6-426d-428e-af4a-dbff72b93714-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rckrh\" (UID: \"baa674c6-426d-428e-af4a-dbff72b93714\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.393661 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/baa674c6-426d-428e-af4a-dbff72b93714-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rckrh\" (UID: \"baa674c6-426d-428e-af4a-dbff72b93714\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.408607 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5wsm\" (UniqueName: \"kubernetes.io/projected/baa674c6-426d-428e-af4a-dbff72b93714-kube-api-access-k5wsm\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rckrh\" (UID: \"baa674c6-426d-428e-af4a-dbff72b93714\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:03 crc kubenswrapper[5016]: I1211 11:10:03.470354 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:04 crc kubenswrapper[5016]: I1211 11:10:04.026582 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh"] Dec 11 11:10:04 crc kubenswrapper[5016]: I1211 11:10:04.058096 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" event={"ID":"baa674c6-426d-428e-af4a-dbff72b93714","Type":"ContainerStarted","Data":"0fc13827a715581077f01b97f16aff755a9bd7bc6d2a2426e46cee30dd16ad48"} Dec 11 11:10:06 crc kubenswrapper[5016]: I1211 11:10:06.097851 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" event={"ID":"baa674c6-426d-428e-af4a-dbff72b93714","Type":"ContainerStarted","Data":"16faac047c439f286caddfadae6d184aa85df9a956d487db519314c5a7b8046a"} Dec 11 11:10:06 crc kubenswrapper[5016]: I1211 11:10:06.121135 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" podStartSLOduration=2.302424987 podStartE2EDuration="3.121108895s" podCreationTimestamp="2025-12-11 11:10:03 +0000 UTC" firstStartedPulling="2025-12-11 11:10:04.033317667 +0000 UTC m=+2120.851877246" lastFinishedPulling="2025-12-11 11:10:04.852001575 +0000 UTC m=+2121.670561154" observedRunningTime="2025-12-11 11:10:06.118105451 +0000 UTC m=+2122.936665030" watchObservedRunningTime="2025-12-11 11:10:06.121108895 +0000 UTC m=+2122.939668484" Dec 11 11:10:11 crc kubenswrapper[5016]: I1211 11:10:11.837898 5016 scope.go:117] "RemoveContainer" containerID="1c5233bd6e4d1368be33d9b819d30c7941f2fee59bbf3d9358cd1a05376cb9d1" Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.189979 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7nsqg"] Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.192609 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.209158 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7nsqg"] Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.318127 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-catalog-content\") pod \"redhat-marketplace-7nsqg\" (UID: \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\") " pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.318608 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6j2sr\" (UniqueName: \"kubernetes.io/projected/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-kube-api-access-6j2sr\") pod \"redhat-marketplace-7nsqg\" (UID: \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\") " pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.318841 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-utilities\") pod \"redhat-marketplace-7nsqg\" (UID: \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\") " pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.420297 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6j2sr\" (UniqueName: \"kubernetes.io/projected/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-kube-api-access-6j2sr\") pod \"redhat-marketplace-7nsqg\" (UID: \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\") " pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.420415 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-utilities\") pod \"redhat-marketplace-7nsqg\" (UID: \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\") " pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.420537 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-catalog-content\") pod \"redhat-marketplace-7nsqg\" (UID: \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\") " pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.421146 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-utilities\") pod \"redhat-marketplace-7nsqg\" (UID: \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\") " pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.421167 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-catalog-content\") pod \"redhat-marketplace-7nsqg\" (UID: \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\") " pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.443191 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6j2sr\" (UniqueName: \"kubernetes.io/projected/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-kube-api-access-6j2sr\") pod \"redhat-marketplace-7nsqg\" (UID: \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\") " pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:18 crc kubenswrapper[5016]: I1211 11:10:18.523043 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:19 crc kubenswrapper[5016]: I1211 11:10:19.041757 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7nsqg"] Dec 11 11:10:19 crc kubenswrapper[5016]: I1211 11:10:19.240647 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7nsqg" event={"ID":"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0","Type":"ContainerStarted","Data":"92f44b8f217e9589e2fd2391f7ffea4f924d64d03ccff6ade58f02afe7fc7830"} Dec 11 11:10:20 crc kubenswrapper[5016]: I1211 11:10:20.251352 5016 generic.go:334] "Generic (PLEG): container finished" podID="52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" containerID="b243259c86aff3e885f3b66885a1014c7dc931ef317af97ed62afc424872e5b7" exitCode=0 Dec 11 11:10:20 crc kubenswrapper[5016]: I1211 11:10:20.251461 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7nsqg" event={"ID":"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0","Type":"ContainerDied","Data":"b243259c86aff3e885f3b66885a1014c7dc931ef317af97ed62afc424872e5b7"} Dec 11 11:10:20 crc kubenswrapper[5016]: I1211 11:10:20.674808 5016 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-58f98f7fd9-rtbw4" podUID="2816d686-f2da-4306-9b07-b27dc9eb88f5" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Dec 11 11:10:22 crc kubenswrapper[5016]: I1211 11:10:22.276282 5016 generic.go:334] "Generic (PLEG): container finished" podID="52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" containerID="7c03526e355586c47643d1edf6eac1bb78226a320d95df3453117ccef949192f" exitCode=0 Dec 11 11:10:22 crc kubenswrapper[5016]: I1211 11:10:22.276440 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7nsqg" event={"ID":"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0","Type":"ContainerDied","Data":"7c03526e355586c47643d1edf6eac1bb78226a320d95df3453117ccef949192f"} Dec 11 11:10:22 crc kubenswrapper[5016]: I1211 11:10:22.278686 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 11:10:24 crc kubenswrapper[5016]: I1211 11:10:24.302100 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7nsqg" event={"ID":"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0","Type":"ContainerStarted","Data":"9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c"} Dec 11 11:10:24 crc kubenswrapper[5016]: I1211 11:10:24.331624 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7nsqg" podStartSLOduration=3.495472817 podStartE2EDuration="6.331596937s" podCreationTimestamp="2025-12-11 11:10:18 +0000 UTC" firstStartedPulling="2025-12-11 11:10:20.253265287 +0000 UTC m=+2137.071824866" lastFinishedPulling="2025-12-11 11:10:23.089389407 +0000 UTC m=+2139.907948986" observedRunningTime="2025-12-11 11:10:24.322030032 +0000 UTC m=+2141.140589621" watchObservedRunningTime="2025-12-11 11:10:24.331596937 +0000 UTC m=+2141.150156526" Dec 11 11:10:28 crc kubenswrapper[5016]: I1211 11:10:28.523611 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:28 crc kubenswrapper[5016]: I1211 11:10:28.524404 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:28 crc kubenswrapper[5016]: I1211 11:10:28.577733 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:29 crc kubenswrapper[5016]: I1211 11:10:29.395875 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:29 crc kubenswrapper[5016]: I1211 11:10:29.448734 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7nsqg"] Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.246162 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dxf2r"] Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.248871 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.258543 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dxf2r"] Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.308590 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-catalog-content\") pod \"redhat-operators-dxf2r\" (UID: \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\") " pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.308682 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-utilities\") pod \"redhat-operators-dxf2r\" (UID: \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\") " pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.308846 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78652\" (UniqueName: \"kubernetes.io/projected/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-kube-api-access-78652\") pod \"redhat-operators-dxf2r\" (UID: \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\") " pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.378256 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7nsqg" podUID="52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" containerName="registry-server" containerID="cri-o://9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c" gracePeriod=2 Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.411578 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78652\" (UniqueName: \"kubernetes.io/projected/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-kube-api-access-78652\") pod \"redhat-operators-dxf2r\" (UID: \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\") " pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.411768 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-catalog-content\") pod \"redhat-operators-dxf2r\" (UID: \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\") " pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.411793 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-utilities\") pod \"redhat-operators-dxf2r\" (UID: \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\") " pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.412628 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-catalog-content\") pod \"redhat-operators-dxf2r\" (UID: \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\") " pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.412833 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-utilities\") pod \"redhat-operators-dxf2r\" (UID: \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\") " pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.433686 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78652\" (UniqueName: \"kubernetes.io/projected/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-kube-api-access-78652\") pod \"redhat-operators-dxf2r\" (UID: \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\") " pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.611926 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.872653 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.926172 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6j2sr\" (UniqueName: \"kubernetes.io/projected/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-kube-api-access-6j2sr\") pod \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\" (UID: \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\") " Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.926481 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-catalog-content\") pod \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\" (UID: \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\") " Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.926608 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-utilities\") pod \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\" (UID: \"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0\") " Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.928186 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-utilities" (OuterVolumeSpecName: "utilities") pod "52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" (UID: "52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.946688 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-kube-api-access-6j2sr" (OuterVolumeSpecName: "kube-api-access-6j2sr") pod "52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" (UID: "52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0"). InnerVolumeSpecName "kube-api-access-6j2sr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:10:31 crc kubenswrapper[5016]: I1211 11:10:31.953531 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" (UID: "52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.029655 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.029688 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6j2sr\" (UniqueName: \"kubernetes.io/projected/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-kube-api-access-6j2sr\") on node \"crc\" DevicePath \"\"" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.029701 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.233572 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dxf2r"] Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.390914 5016 generic.go:334] "Generic (PLEG): container finished" podID="52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" containerID="9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c" exitCode=0 Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.391004 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7nsqg" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.390991 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7nsqg" event={"ID":"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0","Type":"ContainerDied","Data":"9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c"} Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.391068 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7nsqg" event={"ID":"52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0","Type":"ContainerDied","Data":"92f44b8f217e9589e2fd2391f7ffea4f924d64d03ccff6ade58f02afe7fc7830"} Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.391090 5016 scope.go:117] "RemoveContainer" containerID="9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.394449 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dxf2r" event={"ID":"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f","Type":"ContainerStarted","Data":"bad0c6f9685581bc8dd9bfabebe4d10b83f92c8c7bd5dc306d3c00e13236b63f"} Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.414702 5016 scope.go:117] "RemoveContainer" containerID="7c03526e355586c47643d1edf6eac1bb78226a320d95df3453117ccef949192f" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.437034 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7nsqg"] Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.458278 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7nsqg"] Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.468155 5016 scope.go:117] "RemoveContainer" containerID="b243259c86aff3e885f3b66885a1014c7dc931ef317af97ed62afc424872e5b7" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.582414 5016 scope.go:117] "RemoveContainer" containerID="9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c" Dec 11 11:10:32 crc kubenswrapper[5016]: E1211 11:10:32.583172 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c\": container with ID starting with 9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c not found: ID does not exist" containerID="9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.583264 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c"} err="failed to get container status \"9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c\": rpc error: code = NotFound desc = could not find container \"9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c\": container with ID starting with 9e28659cf0ccee32030d99e61572e92e079118f12697277c9d0e6c740b5fb11c not found: ID does not exist" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.583308 5016 scope.go:117] "RemoveContainer" containerID="7c03526e355586c47643d1edf6eac1bb78226a320d95df3453117ccef949192f" Dec 11 11:10:32 crc kubenswrapper[5016]: E1211 11:10:32.583770 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c03526e355586c47643d1edf6eac1bb78226a320d95df3453117ccef949192f\": container with ID starting with 7c03526e355586c47643d1edf6eac1bb78226a320d95df3453117ccef949192f not found: ID does not exist" containerID="7c03526e355586c47643d1edf6eac1bb78226a320d95df3453117ccef949192f" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.583818 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c03526e355586c47643d1edf6eac1bb78226a320d95df3453117ccef949192f"} err="failed to get container status \"7c03526e355586c47643d1edf6eac1bb78226a320d95df3453117ccef949192f\": rpc error: code = NotFound desc = could not find container \"7c03526e355586c47643d1edf6eac1bb78226a320d95df3453117ccef949192f\": container with ID starting with 7c03526e355586c47643d1edf6eac1bb78226a320d95df3453117ccef949192f not found: ID does not exist" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.583855 5016 scope.go:117] "RemoveContainer" containerID="b243259c86aff3e885f3b66885a1014c7dc931ef317af97ed62afc424872e5b7" Dec 11 11:10:32 crc kubenswrapper[5016]: E1211 11:10:32.584352 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b243259c86aff3e885f3b66885a1014c7dc931ef317af97ed62afc424872e5b7\": container with ID starting with b243259c86aff3e885f3b66885a1014c7dc931ef317af97ed62afc424872e5b7 not found: ID does not exist" containerID="b243259c86aff3e885f3b66885a1014c7dc931ef317af97ed62afc424872e5b7" Dec 11 11:10:32 crc kubenswrapper[5016]: I1211 11:10:32.584375 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b243259c86aff3e885f3b66885a1014c7dc931ef317af97ed62afc424872e5b7"} err="failed to get container status \"b243259c86aff3e885f3b66885a1014c7dc931ef317af97ed62afc424872e5b7\": rpc error: code = NotFound desc = could not find container \"b243259c86aff3e885f3b66885a1014c7dc931ef317af97ed62afc424872e5b7\": container with ID starting with b243259c86aff3e885f3b66885a1014c7dc931ef317af97ed62afc424872e5b7 not found: ID does not exist" Dec 11 11:10:33 crc kubenswrapper[5016]: I1211 11:10:33.412408 5016 generic.go:334] "Generic (PLEG): container finished" podID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" containerID="a385aab39a316201779ec040b3def7394fde015586018ec7b172b068095e0ed6" exitCode=0 Dec 11 11:10:33 crc kubenswrapper[5016]: I1211 11:10:33.412507 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dxf2r" event={"ID":"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f","Type":"ContainerDied","Data":"a385aab39a316201779ec040b3def7394fde015586018ec7b172b068095e0ed6"} Dec 11 11:10:33 crc kubenswrapper[5016]: I1211 11:10:33.487666 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" path="/var/lib/kubelet/pods/52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0/volumes" Dec 11 11:10:35 crc kubenswrapper[5016]: I1211 11:10:35.447502 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dxf2r" event={"ID":"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f","Type":"ContainerStarted","Data":"0a47d1a0730a2bece75779c757b728be20b380cf31e181ccf2bfac7cd9c68fb4"} Dec 11 11:10:37 crc kubenswrapper[5016]: I1211 11:10:37.466826 5016 generic.go:334] "Generic (PLEG): container finished" podID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" containerID="0a47d1a0730a2bece75779c757b728be20b380cf31e181ccf2bfac7cd9c68fb4" exitCode=0 Dec 11 11:10:37 crc kubenswrapper[5016]: I1211 11:10:37.466915 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dxf2r" event={"ID":"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f","Type":"ContainerDied","Data":"0a47d1a0730a2bece75779c757b728be20b380cf31e181ccf2bfac7cd9c68fb4"} Dec 11 11:10:39 crc kubenswrapper[5016]: I1211 11:10:39.488527 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dxf2r" event={"ID":"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f","Type":"ContainerStarted","Data":"4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401"} Dec 11 11:10:39 crc kubenswrapper[5016]: I1211 11:10:39.510831 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dxf2r" podStartSLOduration=2.843100838 podStartE2EDuration="8.510800717s" podCreationTimestamp="2025-12-11 11:10:31 +0000 UTC" firstStartedPulling="2025-12-11 11:10:33.414683316 +0000 UTC m=+2150.233242895" lastFinishedPulling="2025-12-11 11:10:39.082383195 +0000 UTC m=+2155.900942774" observedRunningTime="2025-12-11 11:10:39.509790612 +0000 UTC m=+2156.328350211" watchObservedRunningTime="2025-12-11 11:10:39.510800717 +0000 UTC m=+2156.329360306" Dec 11 11:10:41 crc kubenswrapper[5016]: I1211 11:10:41.612909 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:41 crc kubenswrapper[5016]: I1211 11:10:41.614212 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:42 crc kubenswrapper[5016]: I1211 11:10:42.676586 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dxf2r" podUID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" containerName="registry-server" probeResult="failure" output=< Dec 11 11:10:42 crc kubenswrapper[5016]: timeout: failed to connect service ":50051" within 1s Dec 11 11:10:42 crc kubenswrapper[5016]: > Dec 11 11:10:42 crc kubenswrapper[5016]: I1211 11:10:42.933140 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:10:42 crc kubenswrapper[5016]: I1211 11:10:42.933208 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:10:51 crc kubenswrapper[5016]: I1211 11:10:51.661223 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:51 crc kubenswrapper[5016]: I1211 11:10:51.708303 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:51 crc kubenswrapper[5016]: I1211 11:10:51.900510 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dxf2r"] Dec 11 11:10:53 crc kubenswrapper[5016]: I1211 11:10:53.636728 5016 generic.go:334] "Generic (PLEG): container finished" podID="baa674c6-426d-428e-af4a-dbff72b93714" containerID="16faac047c439f286caddfadae6d184aa85df9a956d487db519314c5a7b8046a" exitCode=0 Dec 11 11:10:53 crc kubenswrapper[5016]: I1211 11:10:53.636826 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" event={"ID":"baa674c6-426d-428e-af4a-dbff72b93714","Type":"ContainerDied","Data":"16faac047c439f286caddfadae6d184aa85df9a956d487db519314c5a7b8046a"} Dec 11 11:10:53 crc kubenswrapper[5016]: I1211 11:10:53.637349 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dxf2r" podUID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" containerName="registry-server" containerID="cri-o://4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401" gracePeriod=2 Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.637524 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.650012 5016 generic.go:334] "Generic (PLEG): container finished" podID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" containerID="4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401" exitCode=0 Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.650241 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dxf2r" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.650718 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dxf2r" event={"ID":"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f","Type":"ContainerDied","Data":"4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401"} Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.650746 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dxf2r" event={"ID":"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f","Type":"ContainerDied","Data":"bad0c6f9685581bc8dd9bfabebe4d10b83f92c8c7bd5dc306d3c00e13236b63f"} Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.650762 5016 scope.go:117] "RemoveContainer" containerID="4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.683582 5016 scope.go:117] "RemoveContainer" containerID="0a47d1a0730a2bece75779c757b728be20b380cf31e181ccf2bfac7cd9c68fb4" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.737024 5016 scope.go:117] "RemoveContainer" containerID="a385aab39a316201779ec040b3def7394fde015586018ec7b172b068095e0ed6" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.760151 5016 scope.go:117] "RemoveContainer" containerID="4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401" Dec 11 11:10:54 crc kubenswrapper[5016]: E1211 11:10:54.760873 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401\": container with ID starting with 4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401 not found: ID does not exist" containerID="4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.760907 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401"} err="failed to get container status \"4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401\": rpc error: code = NotFound desc = could not find container \"4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401\": container with ID starting with 4f50927c074dc8453b752b99c1aa187138dfe3428a6c684b1f2fe3be44113401 not found: ID does not exist" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.760930 5016 scope.go:117] "RemoveContainer" containerID="0a47d1a0730a2bece75779c757b728be20b380cf31e181ccf2bfac7cd9c68fb4" Dec 11 11:10:54 crc kubenswrapper[5016]: E1211 11:10:54.761250 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a47d1a0730a2bece75779c757b728be20b380cf31e181ccf2bfac7cd9c68fb4\": container with ID starting with 0a47d1a0730a2bece75779c757b728be20b380cf31e181ccf2bfac7cd9c68fb4 not found: ID does not exist" containerID="0a47d1a0730a2bece75779c757b728be20b380cf31e181ccf2bfac7cd9c68fb4" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.761286 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a47d1a0730a2bece75779c757b728be20b380cf31e181ccf2bfac7cd9c68fb4"} err="failed to get container status \"0a47d1a0730a2bece75779c757b728be20b380cf31e181ccf2bfac7cd9c68fb4\": rpc error: code = NotFound desc = could not find container \"0a47d1a0730a2bece75779c757b728be20b380cf31e181ccf2bfac7cd9c68fb4\": container with ID starting with 0a47d1a0730a2bece75779c757b728be20b380cf31e181ccf2bfac7cd9c68fb4 not found: ID does not exist" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.761308 5016 scope.go:117] "RemoveContainer" containerID="a385aab39a316201779ec040b3def7394fde015586018ec7b172b068095e0ed6" Dec 11 11:10:54 crc kubenswrapper[5016]: E1211 11:10:54.761609 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a385aab39a316201779ec040b3def7394fde015586018ec7b172b068095e0ed6\": container with ID starting with a385aab39a316201779ec040b3def7394fde015586018ec7b172b068095e0ed6 not found: ID does not exist" containerID="a385aab39a316201779ec040b3def7394fde015586018ec7b172b068095e0ed6" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.761640 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a385aab39a316201779ec040b3def7394fde015586018ec7b172b068095e0ed6"} err="failed to get container status \"a385aab39a316201779ec040b3def7394fde015586018ec7b172b068095e0ed6\": rpc error: code = NotFound desc = could not find container \"a385aab39a316201779ec040b3def7394fde015586018ec7b172b068095e0ed6\": container with ID starting with a385aab39a316201779ec040b3def7394fde015586018ec7b172b068095e0ed6 not found: ID does not exist" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.826700 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78652\" (UniqueName: \"kubernetes.io/projected/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-kube-api-access-78652\") pod \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\" (UID: \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\") " Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.826785 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-catalog-content\") pod \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\" (UID: \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\") " Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.826810 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-utilities\") pod \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\" (UID: \"1eefcff2-4d4e-4c74-a84d-1ade68d8d81f\") " Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.827887 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-utilities" (OuterVolumeSpecName: "utilities") pod "1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" (UID: "1eefcff2-4d4e-4c74-a84d-1ade68d8d81f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.832843 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-kube-api-access-78652" (OuterVolumeSpecName: "kube-api-access-78652") pod "1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" (UID: "1eefcff2-4d4e-4c74-a84d-1ade68d8d81f"). InnerVolumeSpecName "kube-api-access-78652". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.929017 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78652\" (UniqueName: \"kubernetes.io/projected/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-kube-api-access-78652\") on node \"crc\" DevicePath \"\"" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.929472 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:10:54 crc kubenswrapper[5016]: I1211 11:10:54.977845 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" (UID: "1eefcff2-4d4e-4c74-a84d-1ade68d8d81f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.031334 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.110067 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.234827 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5wsm\" (UniqueName: \"kubernetes.io/projected/baa674c6-426d-428e-af4a-dbff72b93714-kube-api-access-k5wsm\") pod \"baa674c6-426d-428e-af4a-dbff72b93714\" (UID: \"baa674c6-426d-428e-af4a-dbff72b93714\") " Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.234980 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/baa674c6-426d-428e-af4a-dbff72b93714-ssh-key\") pod \"baa674c6-426d-428e-af4a-dbff72b93714\" (UID: \"baa674c6-426d-428e-af4a-dbff72b93714\") " Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.235121 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/baa674c6-426d-428e-af4a-dbff72b93714-inventory\") pod \"baa674c6-426d-428e-af4a-dbff72b93714\" (UID: \"baa674c6-426d-428e-af4a-dbff72b93714\") " Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.240452 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/baa674c6-426d-428e-af4a-dbff72b93714-kube-api-access-k5wsm" (OuterVolumeSpecName: "kube-api-access-k5wsm") pod "baa674c6-426d-428e-af4a-dbff72b93714" (UID: "baa674c6-426d-428e-af4a-dbff72b93714"). InnerVolumeSpecName "kube-api-access-k5wsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.265547 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/baa674c6-426d-428e-af4a-dbff72b93714-inventory" (OuterVolumeSpecName: "inventory") pod "baa674c6-426d-428e-af4a-dbff72b93714" (UID: "baa674c6-426d-428e-af4a-dbff72b93714"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.267341 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/baa674c6-426d-428e-af4a-dbff72b93714-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "baa674c6-426d-428e-af4a-dbff72b93714" (UID: "baa674c6-426d-428e-af4a-dbff72b93714"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.314558 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dxf2r"] Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.325436 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dxf2r"] Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.338080 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5wsm\" (UniqueName: \"kubernetes.io/projected/baa674c6-426d-428e-af4a-dbff72b93714-kube-api-access-k5wsm\") on node \"crc\" DevicePath \"\"" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.338149 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/baa674c6-426d-428e-af4a-dbff72b93714-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.338162 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/baa674c6-426d-428e-af4a-dbff72b93714-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.488610 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" path="/var/lib/kubelet/pods/1eefcff2-4d4e-4c74-a84d-1ade68d8d81f/volumes" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.672288 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" event={"ID":"baa674c6-426d-428e-af4a-dbff72b93714","Type":"ContainerDied","Data":"0fc13827a715581077f01b97f16aff755a9bd7bc6d2a2426e46cee30dd16ad48"} Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.672607 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0fc13827a715581077f01b97f16aff755a9bd7bc6d2a2426e46cee30dd16ad48" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.672523 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rckrh" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.787835 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-lz69w"] Dec 11 11:10:55 crc kubenswrapper[5016]: E1211 11:10:55.792796 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" containerName="registry-server" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.792832 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" containerName="registry-server" Dec 11 11:10:55 crc kubenswrapper[5016]: E1211 11:10:55.792871 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" containerName="extract-utilities" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.792881 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" containerName="extract-utilities" Dec 11 11:10:55 crc kubenswrapper[5016]: E1211 11:10:55.792900 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" containerName="registry-server" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.792908 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" containerName="registry-server" Dec 11 11:10:55 crc kubenswrapper[5016]: E1211 11:10:55.793028 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" containerName="extract-content" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.793041 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" containerName="extract-content" Dec 11 11:10:55 crc kubenswrapper[5016]: E1211 11:10:55.793059 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" containerName="extract-utilities" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.793065 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" containerName="extract-utilities" Dec 11 11:10:55 crc kubenswrapper[5016]: E1211 11:10:55.793083 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baa674c6-426d-428e-af4a-dbff72b93714" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.793094 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="baa674c6-426d-428e-af4a-dbff72b93714" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 11 11:10:55 crc kubenswrapper[5016]: E1211 11:10:55.793108 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" containerName="extract-content" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.793116 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" containerName="extract-content" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.793405 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="52cc7f97-30b7-430e-9e4b-c31b0cbb8cb0" containerName="registry-server" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.793432 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="baa674c6-426d-428e-af4a-dbff72b93714" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.793449 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1eefcff2-4d4e-4c74-a84d-1ade68d8d81f" containerName="registry-server" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.794675 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.802473 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.802585 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.802768 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.802835 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.822534 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-lz69w"] Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.950913 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfcd7\" (UniqueName: \"kubernetes.io/projected/79db9d17-e0eb-40f5-88ca-5f222544e2b1-kube-api-access-wfcd7\") pod \"ssh-known-hosts-edpm-deployment-lz69w\" (UID: \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\") " pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.951908 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/79db9d17-e0eb-40f5-88ca-5f222544e2b1-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-lz69w\" (UID: \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\") " pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:10:55 crc kubenswrapper[5016]: I1211 11:10:55.952070 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/79db9d17-e0eb-40f5-88ca-5f222544e2b1-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-lz69w\" (UID: \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\") " pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:10:56 crc kubenswrapper[5016]: I1211 11:10:56.055377 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfcd7\" (UniqueName: \"kubernetes.io/projected/79db9d17-e0eb-40f5-88ca-5f222544e2b1-kube-api-access-wfcd7\") pod \"ssh-known-hosts-edpm-deployment-lz69w\" (UID: \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\") " pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:10:56 crc kubenswrapper[5016]: I1211 11:10:56.055511 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/79db9d17-e0eb-40f5-88ca-5f222544e2b1-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-lz69w\" (UID: \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\") " pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:10:56 crc kubenswrapper[5016]: I1211 11:10:56.055623 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/79db9d17-e0eb-40f5-88ca-5f222544e2b1-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-lz69w\" (UID: \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\") " pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:10:56 crc kubenswrapper[5016]: I1211 11:10:56.062554 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/79db9d17-e0eb-40f5-88ca-5f222544e2b1-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-lz69w\" (UID: \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\") " pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:10:56 crc kubenswrapper[5016]: I1211 11:10:56.068011 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/79db9d17-e0eb-40f5-88ca-5f222544e2b1-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-lz69w\" (UID: \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\") " pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:10:56 crc kubenswrapper[5016]: I1211 11:10:56.075366 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfcd7\" (UniqueName: \"kubernetes.io/projected/79db9d17-e0eb-40f5-88ca-5f222544e2b1-kube-api-access-wfcd7\") pod \"ssh-known-hosts-edpm-deployment-lz69w\" (UID: \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\") " pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:10:56 crc kubenswrapper[5016]: I1211 11:10:56.132045 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:10:56 crc kubenswrapper[5016]: I1211 11:10:56.686537 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-lz69w"] Dec 11 11:10:57 crc kubenswrapper[5016]: I1211 11:10:57.698735 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" event={"ID":"79db9d17-e0eb-40f5-88ca-5f222544e2b1","Type":"ContainerStarted","Data":"1877cb93220c748f9d9a172eef113d894fcf70098e8ea01ad0c8035e052c1b99"} Dec 11 11:10:58 crc kubenswrapper[5016]: I1211 11:10:58.712928 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" event={"ID":"79db9d17-e0eb-40f5-88ca-5f222544e2b1","Type":"ContainerStarted","Data":"12eb95455c04048ec748bef549976b22c903af9e5eb162b4296ba2ce407af6c1"} Dec 11 11:10:58 crc kubenswrapper[5016]: I1211 11:10:58.735143 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" podStartSLOduration=2.925381991 podStartE2EDuration="3.735117101s" podCreationTimestamp="2025-12-11 11:10:55 +0000 UTC" firstStartedPulling="2025-12-11 11:10:56.696822077 +0000 UTC m=+2173.515381656" lastFinishedPulling="2025-12-11 11:10:57.506557187 +0000 UTC m=+2174.325116766" observedRunningTime="2025-12-11 11:10:58.734429555 +0000 UTC m=+2175.552989134" watchObservedRunningTime="2025-12-11 11:10:58.735117101 +0000 UTC m=+2175.553676680" Dec 11 11:11:04 crc kubenswrapper[5016]: I1211 11:11:04.767220 5016 generic.go:334] "Generic (PLEG): container finished" podID="79db9d17-e0eb-40f5-88ca-5f222544e2b1" containerID="12eb95455c04048ec748bef549976b22c903af9e5eb162b4296ba2ce407af6c1" exitCode=0 Dec 11 11:11:04 crc kubenswrapper[5016]: I1211 11:11:04.767323 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" event={"ID":"79db9d17-e0eb-40f5-88ca-5f222544e2b1","Type":"ContainerDied","Data":"12eb95455c04048ec748bef549976b22c903af9e5eb162b4296ba2ce407af6c1"} Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.302246 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.412543 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/79db9d17-e0eb-40f5-88ca-5f222544e2b1-inventory-0\") pod \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\" (UID: \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\") " Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.413180 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfcd7\" (UniqueName: \"kubernetes.io/projected/79db9d17-e0eb-40f5-88ca-5f222544e2b1-kube-api-access-wfcd7\") pod \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\" (UID: \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\") " Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.413275 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/79db9d17-e0eb-40f5-88ca-5f222544e2b1-ssh-key-openstack-edpm-ipam\") pod \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\" (UID: \"79db9d17-e0eb-40f5-88ca-5f222544e2b1\") " Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.425222 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79db9d17-e0eb-40f5-88ca-5f222544e2b1-kube-api-access-wfcd7" (OuterVolumeSpecName: "kube-api-access-wfcd7") pod "79db9d17-e0eb-40f5-88ca-5f222544e2b1" (UID: "79db9d17-e0eb-40f5-88ca-5f222544e2b1"). InnerVolumeSpecName "kube-api-access-wfcd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.463067 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79db9d17-e0eb-40f5-88ca-5f222544e2b1-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "79db9d17-e0eb-40f5-88ca-5f222544e2b1" (UID: "79db9d17-e0eb-40f5-88ca-5f222544e2b1"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.470102 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79db9d17-e0eb-40f5-88ca-5f222544e2b1-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "79db9d17-e0eb-40f5-88ca-5f222544e2b1" (UID: "79db9d17-e0eb-40f5-88ca-5f222544e2b1"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.516735 5016 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/79db9d17-e0eb-40f5-88ca-5f222544e2b1-inventory-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.516788 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfcd7\" (UniqueName: \"kubernetes.io/projected/79db9d17-e0eb-40f5-88ca-5f222544e2b1-kube-api-access-wfcd7\") on node \"crc\" DevicePath \"\"" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.516806 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/79db9d17-e0eb-40f5-88ca-5f222544e2b1-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.791910 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" event={"ID":"79db9d17-e0eb-40f5-88ca-5f222544e2b1","Type":"ContainerDied","Data":"1877cb93220c748f9d9a172eef113d894fcf70098e8ea01ad0c8035e052c1b99"} Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.792009 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1877cb93220c748f9d9a172eef113d894fcf70098e8ea01ad0c8035e052c1b99" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.792343 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-lz69w" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.878824 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb"] Dec 11 11:11:06 crc kubenswrapper[5016]: E1211 11:11:06.879387 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79db9d17-e0eb-40f5-88ca-5f222544e2b1" containerName="ssh-known-hosts-edpm-deployment" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.879409 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="79db9d17-e0eb-40f5-88ca-5f222544e2b1" containerName="ssh-known-hosts-edpm-deployment" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.879608 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="79db9d17-e0eb-40f5-88ca-5f222544e2b1" containerName="ssh-known-hosts-edpm-deployment" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.880402 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.883492 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.884510 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.884528 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.884769 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:11:06 crc kubenswrapper[5016]: I1211 11:11:06.894171 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb"] Dec 11 11:11:07 crc kubenswrapper[5016]: I1211 11:11:07.032901 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-px79s\" (UniqueName: \"kubernetes.io/projected/f43b972e-9584-45ea-a540-cc2facfb7ec5-kube-api-access-px79s\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-94jbb\" (UID: \"f43b972e-9584-45ea-a540-cc2facfb7ec5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:07 crc kubenswrapper[5016]: I1211 11:11:07.033143 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f43b972e-9584-45ea-a540-cc2facfb7ec5-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-94jbb\" (UID: \"f43b972e-9584-45ea-a540-cc2facfb7ec5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:07 crc kubenswrapper[5016]: I1211 11:11:07.033179 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f43b972e-9584-45ea-a540-cc2facfb7ec5-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-94jbb\" (UID: \"f43b972e-9584-45ea-a540-cc2facfb7ec5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:07 crc kubenswrapper[5016]: I1211 11:11:07.136717 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-px79s\" (UniqueName: \"kubernetes.io/projected/f43b972e-9584-45ea-a540-cc2facfb7ec5-kube-api-access-px79s\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-94jbb\" (UID: \"f43b972e-9584-45ea-a540-cc2facfb7ec5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:07 crc kubenswrapper[5016]: I1211 11:11:07.136971 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f43b972e-9584-45ea-a540-cc2facfb7ec5-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-94jbb\" (UID: \"f43b972e-9584-45ea-a540-cc2facfb7ec5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:07 crc kubenswrapper[5016]: I1211 11:11:07.137008 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f43b972e-9584-45ea-a540-cc2facfb7ec5-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-94jbb\" (UID: \"f43b972e-9584-45ea-a540-cc2facfb7ec5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:07 crc kubenswrapper[5016]: I1211 11:11:07.142721 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f43b972e-9584-45ea-a540-cc2facfb7ec5-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-94jbb\" (UID: \"f43b972e-9584-45ea-a540-cc2facfb7ec5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:07 crc kubenswrapper[5016]: I1211 11:11:07.143786 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f43b972e-9584-45ea-a540-cc2facfb7ec5-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-94jbb\" (UID: \"f43b972e-9584-45ea-a540-cc2facfb7ec5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:07 crc kubenswrapper[5016]: I1211 11:11:07.158407 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-px79s\" (UniqueName: \"kubernetes.io/projected/f43b972e-9584-45ea-a540-cc2facfb7ec5-kube-api-access-px79s\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-94jbb\" (UID: \"f43b972e-9584-45ea-a540-cc2facfb7ec5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:07 crc kubenswrapper[5016]: I1211 11:11:07.197957 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:07 crc kubenswrapper[5016]: I1211 11:11:07.741484 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb"] Dec 11 11:11:07 crc kubenswrapper[5016]: I1211 11:11:07.801709 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" event={"ID":"f43b972e-9584-45ea-a540-cc2facfb7ec5","Type":"ContainerStarted","Data":"2f5003d81d82a316f8f5a1b0f54ddee9660417aa9d3f5806ec9933cfe8653626"} Dec 11 11:11:08 crc kubenswrapper[5016]: I1211 11:11:08.814328 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" event={"ID":"f43b972e-9584-45ea-a540-cc2facfb7ec5","Type":"ContainerStarted","Data":"76e7aae1bb494ec816f09bf82c15cf5411920d9f8e6953dc56759cbd75846d32"} Dec 11 11:11:08 crc kubenswrapper[5016]: I1211 11:11:08.845919 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" podStartSLOduration=2.331081562 podStartE2EDuration="2.845893471s" podCreationTimestamp="2025-12-11 11:11:06 +0000 UTC" firstStartedPulling="2025-12-11 11:11:07.75006818 +0000 UTC m=+2184.568627759" lastFinishedPulling="2025-12-11 11:11:08.264880089 +0000 UTC m=+2185.083439668" observedRunningTime="2025-12-11 11:11:08.833763765 +0000 UTC m=+2185.652323344" watchObservedRunningTime="2025-12-11 11:11:08.845893471 +0000 UTC m=+2185.664453060" Dec 11 11:11:12 crc kubenswrapper[5016]: I1211 11:11:12.932425 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:11:12 crc kubenswrapper[5016]: I1211 11:11:12.933154 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:11:16 crc kubenswrapper[5016]: I1211 11:11:16.915996 5016 generic.go:334] "Generic (PLEG): container finished" podID="f43b972e-9584-45ea-a540-cc2facfb7ec5" containerID="76e7aae1bb494ec816f09bf82c15cf5411920d9f8e6953dc56759cbd75846d32" exitCode=0 Dec 11 11:11:16 crc kubenswrapper[5016]: I1211 11:11:16.916099 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" event={"ID":"f43b972e-9584-45ea-a540-cc2facfb7ec5","Type":"ContainerDied","Data":"76e7aae1bb494ec816f09bf82c15cf5411920d9f8e6953dc56759cbd75846d32"} Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.371965 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.516302 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-px79s\" (UniqueName: \"kubernetes.io/projected/f43b972e-9584-45ea-a540-cc2facfb7ec5-kube-api-access-px79s\") pod \"f43b972e-9584-45ea-a540-cc2facfb7ec5\" (UID: \"f43b972e-9584-45ea-a540-cc2facfb7ec5\") " Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.516477 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f43b972e-9584-45ea-a540-cc2facfb7ec5-ssh-key\") pod \"f43b972e-9584-45ea-a540-cc2facfb7ec5\" (UID: \"f43b972e-9584-45ea-a540-cc2facfb7ec5\") " Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.516502 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f43b972e-9584-45ea-a540-cc2facfb7ec5-inventory\") pod \"f43b972e-9584-45ea-a540-cc2facfb7ec5\" (UID: \"f43b972e-9584-45ea-a540-cc2facfb7ec5\") " Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.522130 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f43b972e-9584-45ea-a540-cc2facfb7ec5-kube-api-access-px79s" (OuterVolumeSpecName: "kube-api-access-px79s") pod "f43b972e-9584-45ea-a540-cc2facfb7ec5" (UID: "f43b972e-9584-45ea-a540-cc2facfb7ec5"). InnerVolumeSpecName "kube-api-access-px79s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.545314 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f43b972e-9584-45ea-a540-cc2facfb7ec5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f43b972e-9584-45ea-a540-cc2facfb7ec5" (UID: "f43b972e-9584-45ea-a540-cc2facfb7ec5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.555623 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f43b972e-9584-45ea-a540-cc2facfb7ec5-inventory" (OuterVolumeSpecName: "inventory") pod "f43b972e-9584-45ea-a540-cc2facfb7ec5" (UID: "f43b972e-9584-45ea-a540-cc2facfb7ec5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.619002 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-px79s\" (UniqueName: \"kubernetes.io/projected/f43b972e-9584-45ea-a540-cc2facfb7ec5-kube-api-access-px79s\") on node \"crc\" DevicePath \"\"" Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.619054 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f43b972e-9584-45ea-a540-cc2facfb7ec5-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.619066 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f43b972e-9584-45ea-a540-cc2facfb7ec5-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.947401 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" event={"ID":"f43b972e-9584-45ea-a540-cc2facfb7ec5","Type":"ContainerDied","Data":"2f5003d81d82a316f8f5a1b0f54ddee9660417aa9d3f5806ec9933cfe8653626"} Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.947454 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f5003d81d82a316f8f5a1b0f54ddee9660417aa9d3f5806ec9933cfe8653626" Dec 11 11:11:18 crc kubenswrapper[5016]: I1211 11:11:18.947520 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-94jbb" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.095719 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8"] Dec 11 11:11:19 crc kubenswrapper[5016]: E1211 11:11:19.096356 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f43b972e-9584-45ea-a540-cc2facfb7ec5" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.096385 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f43b972e-9584-45ea-a540-cc2facfb7ec5" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.096658 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f43b972e-9584-45ea-a540-cc2facfb7ec5" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.097712 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.102345 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.102548 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.121708 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.121861 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.140522 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8"] Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.232242 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1965e849-9439-404f-96f1-d5ced3154038-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8\" (UID: \"1965e849-9439-404f-96f1-d5ced3154038\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.232337 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zfm6\" (UniqueName: \"kubernetes.io/projected/1965e849-9439-404f-96f1-d5ced3154038-kube-api-access-4zfm6\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8\" (UID: \"1965e849-9439-404f-96f1-d5ced3154038\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.232372 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1965e849-9439-404f-96f1-d5ced3154038-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8\" (UID: \"1965e849-9439-404f-96f1-d5ced3154038\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.334128 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1965e849-9439-404f-96f1-d5ced3154038-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8\" (UID: \"1965e849-9439-404f-96f1-d5ced3154038\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.334317 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1965e849-9439-404f-96f1-d5ced3154038-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8\" (UID: \"1965e849-9439-404f-96f1-d5ced3154038\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.334376 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zfm6\" (UniqueName: \"kubernetes.io/projected/1965e849-9439-404f-96f1-d5ced3154038-kube-api-access-4zfm6\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8\" (UID: \"1965e849-9439-404f-96f1-d5ced3154038\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.338784 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1965e849-9439-404f-96f1-d5ced3154038-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8\" (UID: \"1965e849-9439-404f-96f1-d5ced3154038\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.347567 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1965e849-9439-404f-96f1-d5ced3154038-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8\" (UID: \"1965e849-9439-404f-96f1-d5ced3154038\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.354687 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zfm6\" (UniqueName: \"kubernetes.io/projected/1965e849-9439-404f-96f1-d5ced3154038-kube-api-access-4zfm6\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8\" (UID: \"1965e849-9439-404f-96f1-d5ced3154038\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:19 crc kubenswrapper[5016]: I1211 11:11:19.432458 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:20 crc kubenswrapper[5016]: I1211 11:11:20.023750 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8"] Dec 11 11:11:20 crc kubenswrapper[5016]: I1211 11:11:20.966859 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" event={"ID":"1965e849-9439-404f-96f1-d5ced3154038","Type":"ContainerStarted","Data":"fd7e70ef917f6694d3e49219a8904dbe09b49945d58d400d8e7bde43847517ab"} Dec 11 11:11:20 crc kubenswrapper[5016]: I1211 11:11:20.967199 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" event={"ID":"1965e849-9439-404f-96f1-d5ced3154038","Type":"ContainerStarted","Data":"017e085cfdffd14d70858f0cf333fc2ab7b4ea9368ea179db5baa3acfc64bd8f"} Dec 11 11:11:20 crc kubenswrapper[5016]: I1211 11:11:20.991009 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" podStartSLOduration=1.4329763309999999 podStartE2EDuration="1.990965959s" podCreationTimestamp="2025-12-11 11:11:19 +0000 UTC" firstStartedPulling="2025-12-11 11:11:20.034058912 +0000 UTC m=+2196.852618501" lastFinishedPulling="2025-12-11 11:11:20.59204855 +0000 UTC m=+2197.410608129" observedRunningTime="2025-12-11 11:11:20.98857384 +0000 UTC m=+2197.807133439" watchObservedRunningTime="2025-12-11 11:11:20.990965959 +0000 UTC m=+2197.809525558" Dec 11 11:11:31 crc kubenswrapper[5016]: I1211 11:11:31.092942 5016 generic.go:334] "Generic (PLEG): container finished" podID="1965e849-9439-404f-96f1-d5ced3154038" containerID="fd7e70ef917f6694d3e49219a8904dbe09b49945d58d400d8e7bde43847517ab" exitCode=0 Dec 11 11:11:31 crc kubenswrapper[5016]: I1211 11:11:31.093009 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" event={"ID":"1965e849-9439-404f-96f1-d5ced3154038","Type":"ContainerDied","Data":"fd7e70ef917f6694d3e49219a8904dbe09b49945d58d400d8e7bde43847517ab"} Dec 11 11:11:32 crc kubenswrapper[5016]: I1211 11:11:32.615964 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:32 crc kubenswrapper[5016]: I1211 11:11:32.739367 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1965e849-9439-404f-96f1-d5ced3154038-ssh-key\") pod \"1965e849-9439-404f-96f1-d5ced3154038\" (UID: \"1965e849-9439-404f-96f1-d5ced3154038\") " Dec 11 11:11:32 crc kubenswrapper[5016]: I1211 11:11:32.740360 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1965e849-9439-404f-96f1-d5ced3154038-inventory\") pod \"1965e849-9439-404f-96f1-d5ced3154038\" (UID: \"1965e849-9439-404f-96f1-d5ced3154038\") " Dec 11 11:11:32 crc kubenswrapper[5016]: I1211 11:11:32.740474 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zfm6\" (UniqueName: \"kubernetes.io/projected/1965e849-9439-404f-96f1-d5ced3154038-kube-api-access-4zfm6\") pod \"1965e849-9439-404f-96f1-d5ced3154038\" (UID: \"1965e849-9439-404f-96f1-d5ced3154038\") " Dec 11 11:11:32 crc kubenswrapper[5016]: I1211 11:11:32.750638 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1965e849-9439-404f-96f1-d5ced3154038-kube-api-access-4zfm6" (OuterVolumeSpecName: "kube-api-access-4zfm6") pod "1965e849-9439-404f-96f1-d5ced3154038" (UID: "1965e849-9439-404f-96f1-d5ced3154038"). InnerVolumeSpecName "kube-api-access-4zfm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:11:32 crc kubenswrapper[5016]: I1211 11:11:32.777602 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1965e849-9439-404f-96f1-d5ced3154038-inventory" (OuterVolumeSpecName: "inventory") pod "1965e849-9439-404f-96f1-d5ced3154038" (UID: "1965e849-9439-404f-96f1-d5ced3154038"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:11:32 crc kubenswrapper[5016]: I1211 11:11:32.789315 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1965e849-9439-404f-96f1-d5ced3154038-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1965e849-9439-404f-96f1-d5ced3154038" (UID: "1965e849-9439-404f-96f1-d5ced3154038"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:11:32 crc kubenswrapper[5016]: I1211 11:11:32.843715 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zfm6\" (UniqueName: \"kubernetes.io/projected/1965e849-9439-404f-96f1-d5ced3154038-kube-api-access-4zfm6\") on node \"crc\" DevicePath \"\"" Dec 11 11:11:32 crc kubenswrapper[5016]: I1211 11:11:32.843764 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1965e849-9439-404f-96f1-d5ced3154038-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:11:32 crc kubenswrapper[5016]: I1211 11:11:32.843778 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1965e849-9439-404f-96f1-d5ced3154038-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.119086 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" event={"ID":"1965e849-9439-404f-96f1-d5ced3154038","Type":"ContainerDied","Data":"017e085cfdffd14d70858f0cf333fc2ab7b4ea9368ea179db5baa3acfc64bd8f"} Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.119136 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="017e085cfdffd14d70858f0cf333fc2ab7b4ea9368ea179db5baa3acfc64bd8f" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.119206 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.223385 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg"] Dec 11 11:11:33 crc kubenswrapper[5016]: E1211 11:11:33.224128 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1965e849-9439-404f-96f1-d5ced3154038" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.224161 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="1965e849-9439-404f-96f1-d5ced3154038" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.224436 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="1965e849-9439-404f-96f1-d5ced3154038" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.225499 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.235026 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.235077 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.235178 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.235178 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.235252 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.235033 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg"] Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.235526 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.236312 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.236443 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.354549 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.354615 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.354697 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.354841 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.354868 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.354895 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.354923 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.354984 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.355015 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.355059 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.355086 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.355126 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.355175 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.355236 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbbff\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-kube-api-access-sbbff\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.459771 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.460264 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.460374 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.460470 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.460581 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.461299 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.461438 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.461748 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.461867 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.462028 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.462192 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbbff\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-kube-api-access-sbbff\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.462374 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.462493 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.462591 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.466880 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.469162 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.470226 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.472649 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.473050 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.476209 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.479633 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.480653 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.488881 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.488902 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.489432 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.495565 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.510757 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.512306 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbbff\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-kube-api-access-sbbff\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:33 crc kubenswrapper[5016]: I1211 11:11:33.551669 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:11:34 crc kubenswrapper[5016]: I1211 11:11:34.275615 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg"] Dec 11 11:11:35 crc kubenswrapper[5016]: I1211 11:11:35.139370 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" event={"ID":"a31cb907-f20d-44a6-abc0-53951fe5e793","Type":"ContainerStarted","Data":"3aa513caa15b78e4e83de0accbb07aae660ce7fe57d554ef5e9029742844515f"} Dec 11 11:11:36 crc kubenswrapper[5016]: I1211 11:11:36.163814 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" event={"ID":"a31cb907-f20d-44a6-abc0-53951fe5e793","Type":"ContainerStarted","Data":"0c660a8f947e02ec2128e8a7831848b6fc3474646e797d038f3a0fed1130a87c"} Dec 11 11:11:36 crc kubenswrapper[5016]: I1211 11:11:36.202850 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" podStartSLOduration=2.6026367070000003 podStartE2EDuration="3.202817878s" podCreationTimestamp="2025-12-11 11:11:33 +0000 UTC" firstStartedPulling="2025-12-11 11:11:34.279881004 +0000 UTC m=+2211.098440583" lastFinishedPulling="2025-12-11 11:11:34.880062185 +0000 UTC m=+2211.698621754" observedRunningTime="2025-12-11 11:11:36.196103453 +0000 UTC m=+2213.014663042" watchObservedRunningTime="2025-12-11 11:11:36.202817878 +0000 UTC m=+2213.021377477" Dec 11 11:11:42 crc kubenswrapper[5016]: I1211 11:11:42.933121 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:11:42 crc kubenswrapper[5016]: I1211 11:11:42.933735 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:11:42 crc kubenswrapper[5016]: I1211 11:11:42.933801 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 11:11:42 crc kubenswrapper[5016]: I1211 11:11:42.934804 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"745e2ce5229d52e8fc8364c6aec4dc08214336fac2ba63dc453c716f42c02e8d"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 11:11:42 crc kubenswrapper[5016]: I1211 11:11:42.934878 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://745e2ce5229d52e8fc8364c6aec4dc08214336fac2ba63dc453c716f42c02e8d" gracePeriod=600 Dec 11 11:11:43 crc kubenswrapper[5016]: I1211 11:11:43.245715 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="745e2ce5229d52e8fc8364c6aec4dc08214336fac2ba63dc453c716f42c02e8d" exitCode=0 Dec 11 11:11:43 crc kubenswrapper[5016]: I1211 11:11:43.245815 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"745e2ce5229d52e8fc8364c6aec4dc08214336fac2ba63dc453c716f42c02e8d"} Dec 11 11:11:43 crc kubenswrapper[5016]: I1211 11:11:43.246161 5016 scope.go:117] "RemoveContainer" containerID="1c2c703cc369f81e6ccc0d879223c3491de72957162f630c29147469a8b9d756" Dec 11 11:11:44 crc kubenswrapper[5016]: I1211 11:11:44.259482 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d"} Dec 11 11:12:07 crc kubenswrapper[5016]: I1211 11:12:07.945200 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qgqq9"] Dec 11 11:12:07 crc kubenswrapper[5016]: I1211 11:12:07.948505 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:07 crc kubenswrapper[5016]: I1211 11:12:07.958363 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qgqq9"] Dec 11 11:12:08 crc kubenswrapper[5016]: I1211 11:12:08.076913 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db26e73a-9c91-47ec-9f36-9577c7e07e87-catalog-content\") pod \"certified-operators-qgqq9\" (UID: \"db26e73a-9c91-47ec-9f36-9577c7e07e87\") " pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:08 crc kubenswrapper[5016]: I1211 11:12:08.077097 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db26e73a-9c91-47ec-9f36-9577c7e07e87-utilities\") pod \"certified-operators-qgqq9\" (UID: \"db26e73a-9c91-47ec-9f36-9577c7e07e87\") " pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:08 crc kubenswrapper[5016]: I1211 11:12:08.077141 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88zb8\" (UniqueName: \"kubernetes.io/projected/db26e73a-9c91-47ec-9f36-9577c7e07e87-kube-api-access-88zb8\") pod \"certified-operators-qgqq9\" (UID: \"db26e73a-9c91-47ec-9f36-9577c7e07e87\") " pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:08 crc kubenswrapper[5016]: I1211 11:12:08.179477 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db26e73a-9c91-47ec-9f36-9577c7e07e87-catalog-content\") pod \"certified-operators-qgqq9\" (UID: \"db26e73a-9c91-47ec-9f36-9577c7e07e87\") " pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:08 crc kubenswrapper[5016]: I1211 11:12:08.179904 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db26e73a-9c91-47ec-9f36-9577c7e07e87-utilities\") pod \"certified-operators-qgqq9\" (UID: \"db26e73a-9c91-47ec-9f36-9577c7e07e87\") " pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:08 crc kubenswrapper[5016]: I1211 11:12:08.179983 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88zb8\" (UniqueName: \"kubernetes.io/projected/db26e73a-9c91-47ec-9f36-9577c7e07e87-kube-api-access-88zb8\") pod \"certified-operators-qgqq9\" (UID: \"db26e73a-9c91-47ec-9f36-9577c7e07e87\") " pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:08 crc kubenswrapper[5016]: I1211 11:12:08.181145 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db26e73a-9c91-47ec-9f36-9577c7e07e87-catalog-content\") pod \"certified-operators-qgqq9\" (UID: \"db26e73a-9c91-47ec-9f36-9577c7e07e87\") " pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:08 crc kubenswrapper[5016]: I1211 11:12:08.181689 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db26e73a-9c91-47ec-9f36-9577c7e07e87-utilities\") pod \"certified-operators-qgqq9\" (UID: \"db26e73a-9c91-47ec-9f36-9577c7e07e87\") " pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:08 crc kubenswrapper[5016]: I1211 11:12:08.220495 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88zb8\" (UniqueName: \"kubernetes.io/projected/db26e73a-9c91-47ec-9f36-9577c7e07e87-kube-api-access-88zb8\") pod \"certified-operators-qgqq9\" (UID: \"db26e73a-9c91-47ec-9f36-9577c7e07e87\") " pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:08 crc kubenswrapper[5016]: I1211 11:12:08.278968 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:08 crc kubenswrapper[5016]: I1211 11:12:08.855772 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qgqq9"] Dec 11 11:12:09 crc kubenswrapper[5016]: I1211 11:12:09.546240 5016 generic.go:334] "Generic (PLEG): container finished" podID="db26e73a-9c91-47ec-9f36-9577c7e07e87" containerID="0d856921d50eeaf1a281d0c638852cb75e106dc5456a8ed17467012eb47dc16f" exitCode=0 Dec 11 11:12:09 crc kubenswrapper[5016]: I1211 11:12:09.546324 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgqq9" event={"ID":"db26e73a-9c91-47ec-9f36-9577c7e07e87","Type":"ContainerDied","Data":"0d856921d50eeaf1a281d0c638852cb75e106dc5456a8ed17467012eb47dc16f"} Dec 11 11:12:09 crc kubenswrapper[5016]: I1211 11:12:09.546677 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgqq9" event={"ID":"db26e73a-9c91-47ec-9f36-9577c7e07e87","Type":"ContainerStarted","Data":"690f7d54dd1b312124a93178a326a0ee32c46beffef549b71a4a06cd0ac40e22"} Dec 11 11:12:13 crc kubenswrapper[5016]: I1211 11:12:13.594173 5016 generic.go:334] "Generic (PLEG): container finished" podID="a31cb907-f20d-44a6-abc0-53951fe5e793" containerID="0c660a8f947e02ec2128e8a7831848b6fc3474646e797d038f3a0fed1130a87c" exitCode=0 Dec 11 11:12:13 crc kubenswrapper[5016]: I1211 11:12:13.594276 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" event={"ID":"a31cb907-f20d-44a6-abc0-53951fe5e793","Type":"ContainerDied","Data":"0c660a8f947e02ec2128e8a7831848b6fc3474646e797d038f3a0fed1130a87c"} Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.116481 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.279464 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-bootstrap-combined-ca-bundle\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.279735 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-ovn-combined-ca-bundle\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.279880 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbbff\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-kube-api-access-sbbff\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.280084 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.280223 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.280337 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-telemetry-combined-ca-bundle\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.280462 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-inventory\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.280559 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-repo-setup-combined-ca-bundle\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.280690 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-ovn-default-certs-0\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.281357 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.281495 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-ssh-key\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.281682 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-neutron-metadata-combined-ca-bundle\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.281776 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-nova-combined-ca-bundle\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.281854 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-libvirt-combined-ca-bundle\") pod \"a31cb907-f20d-44a6-abc0-53951fe5e793\" (UID: \"a31cb907-f20d-44a6-abc0-53951fe5e793\") " Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.287621 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.287736 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-kube-api-access-sbbff" (OuterVolumeSpecName: "kube-api-access-sbbff") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "kube-api-access-sbbff". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.288426 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.288560 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.295280 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.295314 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.295431 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.324374 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.324848 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.324930 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.325436 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.325488 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.326952 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-inventory" (OuterVolumeSpecName: "inventory") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.332206 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a31cb907-f20d-44a6-abc0-53951fe5e793" (UID: "a31cb907-f20d-44a6-abc0-53951fe5e793"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.383920 5016 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.383982 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbbff\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-kube-api-access-sbbff\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.383993 5016 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.384006 5016 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.384019 5016 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.384029 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.384037 5016 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.384046 5016 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.384058 5016 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a31cb907-f20d-44a6-abc0-53951fe5e793-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.384067 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.384080 5016 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.384090 5016 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.384101 5016 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.384111 5016 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31cb907-f20d-44a6-abc0-53951fe5e793-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.616209 5016 generic.go:334] "Generic (PLEG): container finished" podID="db26e73a-9c91-47ec-9f36-9577c7e07e87" containerID="938adc5c0af855b7ba43c5137daea5c88bafd1d05623efd0347c206cc55a9a8b" exitCode=0 Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.616329 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgqq9" event={"ID":"db26e73a-9c91-47ec-9f36-9577c7e07e87","Type":"ContainerDied","Data":"938adc5c0af855b7ba43c5137daea5c88bafd1d05623efd0347c206cc55a9a8b"} Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.618253 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" event={"ID":"a31cb907-f20d-44a6-abc0-53951fe5e793","Type":"ContainerDied","Data":"3aa513caa15b78e4e83de0accbb07aae660ce7fe57d554ef5e9029742844515f"} Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.618308 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3aa513caa15b78e4e83de0accbb07aae660ce7fe57d554ef5e9029742844515f" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.618318 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.729975 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz"] Dec 11 11:12:15 crc kubenswrapper[5016]: E1211 11:12:15.730755 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a31cb907-f20d-44a6-abc0-53951fe5e793" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.730774 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="a31cb907-f20d-44a6-abc0-53951fe5e793" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.730974 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="a31cb907-f20d-44a6-abc0-53951fe5e793" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.731786 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.736644 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.738449 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.738591 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.738591 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.739429 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.741834 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz"] Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.894056 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.894158 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/68a2fe3a-3815-4605-b685-2ffe583f46d4-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.894222 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.894267 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.894289 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gswn\" (UniqueName: \"kubernetes.io/projected/68a2fe3a-3815-4605-b685-2ffe583f46d4-kube-api-access-8gswn\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.995990 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.996046 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gswn\" (UniqueName: \"kubernetes.io/projected/68a2fe3a-3815-4605-b685-2ffe583f46d4-kube-api-access-8gswn\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.996160 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.996254 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/68a2fe3a-3815-4605-b685-2ffe583f46d4-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.996332 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:15 crc kubenswrapper[5016]: I1211 11:12:15.997650 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/68a2fe3a-3815-4605-b685-2ffe583f46d4-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:16 crc kubenswrapper[5016]: I1211 11:12:16.005710 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:16 crc kubenswrapper[5016]: I1211 11:12:16.006273 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:16 crc kubenswrapper[5016]: I1211 11:12:16.006492 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:16 crc kubenswrapper[5016]: I1211 11:12:16.020895 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gswn\" (UniqueName: \"kubernetes.io/projected/68a2fe3a-3815-4605-b685-2ffe583f46d4-kube-api-access-8gswn\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-b97tz\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:16 crc kubenswrapper[5016]: I1211 11:12:16.053183 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:12:16 crc kubenswrapper[5016]: I1211 11:12:16.703897 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz"] Dec 11 11:12:16 crc kubenswrapper[5016]: W1211 11:12:16.706296 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod68a2fe3a_3815_4605_b685_2ffe583f46d4.slice/crio-58cdd040d641d007e08b4b3cf293b95892ef5fae4d5b0fdcfd0bef963a32ecce WatchSource:0}: Error finding container 58cdd040d641d007e08b4b3cf293b95892ef5fae4d5b0fdcfd0bef963a32ecce: Status 404 returned error can't find the container with id 58cdd040d641d007e08b4b3cf293b95892ef5fae4d5b0fdcfd0bef963a32ecce Dec 11 11:12:17 crc kubenswrapper[5016]: I1211 11:12:17.647016 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" event={"ID":"68a2fe3a-3815-4605-b685-2ffe583f46d4","Type":"ContainerStarted","Data":"58cdd040d641d007e08b4b3cf293b95892ef5fae4d5b0fdcfd0bef963a32ecce"} Dec 11 11:12:17 crc kubenswrapper[5016]: I1211 11:12:17.651384 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgqq9" event={"ID":"db26e73a-9c91-47ec-9f36-9577c7e07e87","Type":"ContainerStarted","Data":"a655e126a60f82cfded89c7bfac2d47fd45483b6ed2cb369dd344c43fa0cd473"} Dec 11 11:12:17 crc kubenswrapper[5016]: I1211 11:12:17.682517 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qgqq9" podStartSLOduration=3.2517785310000002 podStartE2EDuration="10.682486247s" podCreationTimestamp="2025-12-11 11:12:07 +0000 UTC" firstStartedPulling="2025-12-11 11:12:09.549050387 +0000 UTC m=+2246.367609966" lastFinishedPulling="2025-12-11 11:12:16.979758103 +0000 UTC m=+2253.798317682" observedRunningTime="2025-12-11 11:12:17.672354319 +0000 UTC m=+2254.490913908" watchObservedRunningTime="2025-12-11 11:12:17.682486247 +0000 UTC m=+2254.501045826" Dec 11 11:12:18 crc kubenswrapper[5016]: I1211 11:12:18.279266 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:18 crc kubenswrapper[5016]: I1211 11:12:18.279347 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:18 crc kubenswrapper[5016]: I1211 11:12:18.664773 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" event={"ID":"68a2fe3a-3815-4605-b685-2ffe583f46d4","Type":"ContainerStarted","Data":"01c6ee0bdec43094f6472fb1eebc3778cf846322b5b12bad6f98c3151eb3d971"} Dec 11 11:12:18 crc kubenswrapper[5016]: I1211 11:12:18.704245 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" podStartSLOduration=3.04408291 podStartE2EDuration="3.704220492s" podCreationTimestamp="2025-12-11 11:12:15 +0000 UTC" firstStartedPulling="2025-12-11 11:12:16.709446496 +0000 UTC m=+2253.528006075" lastFinishedPulling="2025-12-11 11:12:17.369584078 +0000 UTC m=+2254.188143657" observedRunningTime="2025-12-11 11:12:18.693237503 +0000 UTC m=+2255.511797082" watchObservedRunningTime="2025-12-11 11:12:18.704220492 +0000 UTC m=+2255.522780071" Dec 11 11:12:19 crc kubenswrapper[5016]: I1211 11:12:19.337087 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-qgqq9" podUID="db26e73a-9c91-47ec-9f36-9577c7e07e87" containerName="registry-server" probeResult="failure" output=< Dec 11 11:12:19 crc kubenswrapper[5016]: timeout: failed to connect service ":50051" within 1s Dec 11 11:12:19 crc kubenswrapper[5016]: > Dec 11 11:12:28 crc kubenswrapper[5016]: I1211 11:12:28.331431 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:28 crc kubenswrapper[5016]: I1211 11:12:28.404279 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:12:28 crc kubenswrapper[5016]: I1211 11:12:28.495049 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qgqq9"] Dec 11 11:12:28 crc kubenswrapper[5016]: I1211 11:12:28.578923 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vrh9v"] Dec 11 11:12:28 crc kubenswrapper[5016]: I1211 11:12:28.579297 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vrh9v" podUID="d896058c-2d6d-47e8-b1fc-d0b68de8098e" containerName="registry-server" containerID="cri-o://b8e2eb53287f772715861e9a20cb26d32a42e9030a8076d320ccbd808a1d6f15" gracePeriod=2 Dec 11 11:12:28 crc kubenswrapper[5016]: I1211 11:12:28.790774 5016 generic.go:334] "Generic (PLEG): container finished" podID="d896058c-2d6d-47e8-b1fc-d0b68de8098e" containerID="b8e2eb53287f772715861e9a20cb26d32a42e9030a8076d320ccbd808a1d6f15" exitCode=0 Dec 11 11:12:28 crc kubenswrapper[5016]: I1211 11:12:28.790850 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrh9v" event={"ID":"d896058c-2d6d-47e8-b1fc-d0b68de8098e","Type":"ContainerDied","Data":"b8e2eb53287f772715861e9a20cb26d32a42e9030a8076d320ccbd808a1d6f15"} Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.114295 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.214396 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d896058c-2d6d-47e8-b1fc-d0b68de8098e-utilities\") pod \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\" (UID: \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\") " Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.214510 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d896058c-2d6d-47e8-b1fc-d0b68de8098e-catalog-content\") pod \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\" (UID: \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\") " Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.214653 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phgds\" (UniqueName: \"kubernetes.io/projected/d896058c-2d6d-47e8-b1fc-d0b68de8098e-kube-api-access-phgds\") pod \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\" (UID: \"d896058c-2d6d-47e8-b1fc-d0b68de8098e\") " Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.215513 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d896058c-2d6d-47e8-b1fc-d0b68de8098e-utilities" (OuterVolumeSpecName: "utilities") pod "d896058c-2d6d-47e8-b1fc-d0b68de8098e" (UID: "d896058c-2d6d-47e8-b1fc-d0b68de8098e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.238876 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d896058c-2d6d-47e8-b1fc-d0b68de8098e-kube-api-access-phgds" (OuterVolumeSpecName: "kube-api-access-phgds") pod "d896058c-2d6d-47e8-b1fc-d0b68de8098e" (UID: "d896058c-2d6d-47e8-b1fc-d0b68de8098e"). InnerVolumeSpecName "kube-api-access-phgds". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.287074 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d896058c-2d6d-47e8-b1fc-d0b68de8098e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d896058c-2d6d-47e8-b1fc-d0b68de8098e" (UID: "d896058c-2d6d-47e8-b1fc-d0b68de8098e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.322045 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d896058c-2d6d-47e8-b1fc-d0b68de8098e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.322088 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phgds\" (UniqueName: \"kubernetes.io/projected/d896058c-2d6d-47e8-b1fc-d0b68de8098e-kube-api-access-phgds\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.322099 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d896058c-2d6d-47e8-b1fc-d0b68de8098e-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.822210 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vrh9v" Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.822832 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrh9v" event={"ID":"d896058c-2d6d-47e8-b1fc-d0b68de8098e","Type":"ContainerDied","Data":"520eba003862dce66fda661a88742f1fe89fc127826dcf0ab984f6d906e19ece"} Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.822877 5016 scope.go:117] "RemoveContainer" containerID="b8e2eb53287f772715861e9a20cb26d32a42e9030a8076d320ccbd808a1d6f15" Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.858183 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vrh9v"] Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.866380 5016 scope.go:117] "RemoveContainer" containerID="baec5952fe8989c48fd508c96b928666a88e74d6a7319f67242050bdd0a1e12c" Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.868749 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vrh9v"] Dec 11 11:12:29 crc kubenswrapper[5016]: I1211 11:12:29.893463 5016 scope.go:117] "RemoveContainer" containerID="345cb63392099b0df98d1469e7ca0cab0bbb7640d884ae7ddfe84accf3cab7e6" Dec 11 11:12:31 crc kubenswrapper[5016]: I1211 11:12:31.535870 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d896058c-2d6d-47e8-b1fc-d0b68de8098e" path="/var/lib/kubelet/pods/d896058c-2d6d-47e8-b1fc-d0b68de8098e/volumes" Dec 11 11:13:22 crc kubenswrapper[5016]: I1211 11:13:22.401533 5016 generic.go:334] "Generic (PLEG): container finished" podID="68a2fe3a-3815-4605-b685-2ffe583f46d4" containerID="01c6ee0bdec43094f6472fb1eebc3778cf846322b5b12bad6f98c3151eb3d971" exitCode=0 Dec 11 11:13:22 crc kubenswrapper[5016]: I1211 11:13:22.401619 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" event={"ID":"68a2fe3a-3815-4605-b685-2ffe583f46d4","Type":"ContainerDied","Data":"01c6ee0bdec43094f6472fb1eebc3778cf846322b5b12bad6f98c3151eb3d971"} Dec 11 11:13:23 crc kubenswrapper[5016]: I1211 11:13:23.896698 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.011062 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/68a2fe3a-3815-4605-b685-2ffe583f46d4-ovncontroller-config-0\") pod \"68a2fe3a-3815-4605-b685-2ffe583f46d4\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.011856 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-ovn-combined-ca-bundle\") pod \"68a2fe3a-3815-4605-b685-2ffe583f46d4\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.012068 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gswn\" (UniqueName: \"kubernetes.io/projected/68a2fe3a-3815-4605-b685-2ffe583f46d4-kube-api-access-8gswn\") pod \"68a2fe3a-3815-4605-b685-2ffe583f46d4\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.012153 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-inventory\") pod \"68a2fe3a-3815-4605-b685-2ffe583f46d4\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.012226 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-ssh-key\") pod \"68a2fe3a-3815-4605-b685-2ffe583f46d4\" (UID: \"68a2fe3a-3815-4605-b685-2ffe583f46d4\") " Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.021282 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "68a2fe3a-3815-4605-b685-2ffe583f46d4" (UID: "68a2fe3a-3815-4605-b685-2ffe583f46d4"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.021376 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68a2fe3a-3815-4605-b685-2ffe583f46d4-kube-api-access-8gswn" (OuterVolumeSpecName: "kube-api-access-8gswn") pod "68a2fe3a-3815-4605-b685-2ffe583f46d4" (UID: "68a2fe3a-3815-4605-b685-2ffe583f46d4"). InnerVolumeSpecName "kube-api-access-8gswn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.044299 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "68a2fe3a-3815-4605-b685-2ffe583f46d4" (UID: "68a2fe3a-3815-4605-b685-2ffe583f46d4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.063621 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/68a2fe3a-3815-4605-b685-2ffe583f46d4-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "68a2fe3a-3815-4605-b685-2ffe583f46d4" (UID: "68a2fe3a-3815-4605-b685-2ffe583f46d4"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.070232 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-inventory" (OuterVolumeSpecName: "inventory") pod "68a2fe3a-3815-4605-b685-2ffe583f46d4" (UID: "68a2fe3a-3815-4605-b685-2ffe583f46d4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.115069 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.115116 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.115129 5016 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/68a2fe3a-3815-4605-b685-2ffe583f46d4-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.115145 5016 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a2fe3a-3815-4605-b685-2ffe583f46d4-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.115154 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gswn\" (UniqueName: \"kubernetes.io/projected/68a2fe3a-3815-4605-b685-2ffe583f46d4-kube-api-access-8gswn\") on node \"crc\" DevicePath \"\"" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.437341 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" event={"ID":"68a2fe3a-3815-4605-b685-2ffe583f46d4","Type":"ContainerDied","Data":"58cdd040d641d007e08b4b3cf293b95892ef5fae4d5b0fdcfd0bef963a32ecce"} Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.437387 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="58cdd040d641d007e08b4b3cf293b95892ef5fae4d5b0fdcfd0bef963a32ecce" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.437454 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-b97tz" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.526051 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2"] Dec 11 11:13:24 crc kubenswrapper[5016]: E1211 11:13:24.528910 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d896058c-2d6d-47e8-b1fc-d0b68de8098e" containerName="extract-utilities" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.528950 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="d896058c-2d6d-47e8-b1fc-d0b68de8098e" containerName="extract-utilities" Dec 11 11:13:24 crc kubenswrapper[5016]: E1211 11:13:24.528974 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d896058c-2d6d-47e8-b1fc-d0b68de8098e" containerName="extract-content" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.528981 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="d896058c-2d6d-47e8-b1fc-d0b68de8098e" containerName="extract-content" Dec 11 11:13:24 crc kubenswrapper[5016]: E1211 11:13:24.529005 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d896058c-2d6d-47e8-b1fc-d0b68de8098e" containerName="registry-server" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.529015 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="d896058c-2d6d-47e8-b1fc-d0b68de8098e" containerName="registry-server" Dec 11 11:13:24 crc kubenswrapper[5016]: E1211 11:13:24.529040 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68a2fe3a-3815-4605-b685-2ffe583f46d4" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.529046 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="68a2fe3a-3815-4605-b685-2ffe583f46d4" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.529802 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="d896058c-2d6d-47e8-b1fc-d0b68de8098e" containerName="registry-server" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.529835 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="68a2fe3a-3815-4605-b685-2ffe583f46d4" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.531137 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.536191 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.536357 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.536716 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.536904 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.537100 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.537220 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.545461 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2"] Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.631537 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.631631 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.632103 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.632166 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-745x9\" (UniqueName: \"kubernetes.io/projected/24b4bd76-ba99-43ad-91e9-4fdf518a6935-kube-api-access-745x9\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.632296 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.632379 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.734348 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.734402 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.734499 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.734519 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-745x9\" (UniqueName: \"kubernetes.io/projected/24b4bd76-ba99-43ad-91e9-4fdf518a6935-kube-api-access-745x9\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.734564 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.734603 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.739373 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.739537 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.740171 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.740383 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.740913 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.754830 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-745x9\" (UniqueName: \"kubernetes.io/projected/24b4bd76-ba99-43ad-91e9-4fdf518a6935-kube-api-access-745x9\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:24 crc kubenswrapper[5016]: I1211 11:13:24.849054 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:13:25 crc kubenswrapper[5016]: I1211 11:13:25.424709 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2"] Dec 11 11:13:25 crc kubenswrapper[5016]: W1211 11:13:25.426463 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod24b4bd76_ba99_43ad_91e9_4fdf518a6935.slice/crio-4710a2a6a6f53347211fc44e28bbbd2a61f1ac57e82b7f9d4dc3a4395270f8c5 WatchSource:0}: Error finding container 4710a2a6a6f53347211fc44e28bbbd2a61f1ac57e82b7f9d4dc3a4395270f8c5: Status 404 returned error can't find the container with id 4710a2a6a6f53347211fc44e28bbbd2a61f1ac57e82b7f9d4dc3a4395270f8c5 Dec 11 11:13:25 crc kubenswrapper[5016]: I1211 11:13:25.447631 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" event={"ID":"24b4bd76-ba99-43ad-91e9-4fdf518a6935","Type":"ContainerStarted","Data":"4710a2a6a6f53347211fc44e28bbbd2a61f1ac57e82b7f9d4dc3a4395270f8c5"} Dec 11 11:13:26 crc kubenswrapper[5016]: I1211 11:13:26.459002 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" event={"ID":"24b4bd76-ba99-43ad-91e9-4fdf518a6935","Type":"ContainerStarted","Data":"e0361ecffc589a88d167000452fd3aeff14ab84fe2b662546ca31877beb626b9"} Dec 11 11:14:11 crc kubenswrapper[5016]: I1211 11:14:11.959633 5016 generic.go:334] "Generic (PLEG): container finished" podID="24b4bd76-ba99-43ad-91e9-4fdf518a6935" containerID="e0361ecffc589a88d167000452fd3aeff14ab84fe2b662546ca31877beb626b9" exitCode=0 Dec 11 11:14:11 crc kubenswrapper[5016]: I1211 11:14:11.959715 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" event={"ID":"24b4bd76-ba99-43ad-91e9-4fdf518a6935","Type":"ContainerDied","Data":"e0361ecffc589a88d167000452fd3aeff14ab84fe2b662546ca31877beb626b9"} Dec 11 11:14:12 crc kubenswrapper[5016]: I1211 11:14:12.932985 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:14:12 crc kubenswrapper[5016]: I1211 11:14:12.933065 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.424924 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.473789 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-neutron-ovn-metadata-agent-neutron-config-0\") pod \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.473874 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-nova-metadata-neutron-config-0\") pod \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.476098 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-inventory\") pod \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.476185 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-745x9\" (UniqueName: \"kubernetes.io/projected/24b4bd76-ba99-43ad-91e9-4fdf518a6935-kube-api-access-745x9\") pod \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.476334 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-ssh-key\") pod \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.476408 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-neutron-metadata-combined-ca-bundle\") pod \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\" (UID: \"24b4bd76-ba99-43ad-91e9-4fdf518a6935\") " Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.488153 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "24b4bd76-ba99-43ad-91e9-4fdf518a6935" (UID: "24b4bd76-ba99-43ad-91e9-4fdf518a6935"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.489309 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24b4bd76-ba99-43ad-91e9-4fdf518a6935-kube-api-access-745x9" (OuterVolumeSpecName: "kube-api-access-745x9") pod "24b4bd76-ba99-43ad-91e9-4fdf518a6935" (UID: "24b4bd76-ba99-43ad-91e9-4fdf518a6935"). InnerVolumeSpecName "kube-api-access-745x9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.515848 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-inventory" (OuterVolumeSpecName: "inventory") pod "24b4bd76-ba99-43ad-91e9-4fdf518a6935" (UID: "24b4bd76-ba99-43ad-91e9-4fdf518a6935"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.525607 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "24b4bd76-ba99-43ad-91e9-4fdf518a6935" (UID: "24b4bd76-ba99-43ad-91e9-4fdf518a6935"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.547060 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "24b4bd76-ba99-43ad-91e9-4fdf518a6935" (UID: "24b4bd76-ba99-43ad-91e9-4fdf518a6935"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.549819 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "24b4bd76-ba99-43ad-91e9-4fdf518a6935" (UID: "24b4bd76-ba99-43ad-91e9-4fdf518a6935"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.579110 5016 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.579146 5016 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.579157 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.579174 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-745x9\" (UniqueName: \"kubernetes.io/projected/24b4bd76-ba99-43ad-91e9-4fdf518a6935-kube-api-access-745x9\") on node \"crc\" DevicePath \"\"" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.579184 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.579194 5016 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24b4bd76-ba99-43ad-91e9-4fdf518a6935-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.985032 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" event={"ID":"24b4bd76-ba99-43ad-91e9-4fdf518a6935","Type":"ContainerDied","Data":"4710a2a6a6f53347211fc44e28bbbd2a61f1ac57e82b7f9d4dc3a4395270f8c5"} Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.985500 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4710a2a6a6f53347211fc44e28bbbd2a61f1ac57e82b7f9d4dc3a4395270f8c5" Dec 11 11:14:13 crc kubenswrapper[5016]: I1211 11:14:13.985187 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.117011 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd"] Dec 11 11:14:14 crc kubenswrapper[5016]: E1211 11:14:14.117485 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24b4bd76-ba99-43ad-91e9-4fdf518a6935" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.117509 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="24b4bd76-ba99-43ad-91e9-4fdf518a6935" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.117687 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="24b4bd76-ba99-43ad-91e9-4fdf518a6935" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.118403 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.123688 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.123930 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.124627 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.124771 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.124997 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.139682 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd"] Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.194239 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.194430 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.194495 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.194581 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzwjj\" (UniqueName: \"kubernetes.io/projected/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-kube-api-access-rzwjj\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.194657 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.297015 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.297156 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzwjj\" (UniqueName: \"kubernetes.io/projected/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-kube-api-access-rzwjj\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.297202 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.297295 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.297442 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.303229 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.303443 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.311402 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.312265 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.315576 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzwjj\" (UniqueName: \"kubernetes.io/projected/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-kube-api-access-rzwjj\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:14 crc kubenswrapper[5016]: I1211 11:14:14.441318 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:14:15 crc kubenswrapper[5016]: I1211 11:14:15.016080 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd"] Dec 11 11:14:16 crc kubenswrapper[5016]: I1211 11:14:16.007918 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" event={"ID":"ff52a65c-c0b6-4d71-8038-b8c079cd1d64","Type":"ContainerStarted","Data":"20b66f7f30e168d1c3da72148f46a8d8168a72170742f3ac64ebb40d96c4b24b"} Dec 11 11:14:16 crc kubenswrapper[5016]: I1211 11:14:16.008328 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" event={"ID":"ff52a65c-c0b6-4d71-8038-b8c079cd1d64","Type":"ContainerStarted","Data":"86a2db051237f9f584cdaaa5e547b2b989ac58ff0023074907961f081879b135"} Dec 11 11:14:16 crc kubenswrapper[5016]: I1211 11:14:16.038406 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" podStartSLOduration=1.543678033 podStartE2EDuration="2.038375468s" podCreationTimestamp="2025-12-11 11:14:14 +0000 UTC" firstStartedPulling="2025-12-11 11:14:15.031451477 +0000 UTC m=+2371.850011056" lastFinishedPulling="2025-12-11 11:14:15.526148912 +0000 UTC m=+2372.344708491" observedRunningTime="2025-12-11 11:14:16.030874225 +0000 UTC m=+2372.849433814" watchObservedRunningTime="2025-12-11 11:14:16.038375468 +0000 UTC m=+2372.856935067" Dec 11 11:14:40 crc kubenswrapper[5016]: I1211 11:14:40.684390 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-s8gjw"] Dec 11 11:14:40 crc kubenswrapper[5016]: I1211 11:14:40.687834 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:40 crc kubenswrapper[5016]: I1211 11:14:40.699964 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s8gjw"] Dec 11 11:14:40 crc kubenswrapper[5016]: I1211 11:14:40.795610 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-utilities\") pod \"community-operators-s8gjw\" (UID: \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\") " pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:40 crc kubenswrapper[5016]: I1211 11:14:40.795925 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-catalog-content\") pod \"community-operators-s8gjw\" (UID: \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\") " pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:40 crc kubenswrapper[5016]: I1211 11:14:40.796202 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnj8s\" (UniqueName: \"kubernetes.io/projected/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-kube-api-access-hnj8s\") pod \"community-operators-s8gjw\" (UID: \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\") " pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:40 crc kubenswrapper[5016]: I1211 11:14:40.898147 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnj8s\" (UniqueName: \"kubernetes.io/projected/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-kube-api-access-hnj8s\") pod \"community-operators-s8gjw\" (UID: \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\") " pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:40 crc kubenswrapper[5016]: I1211 11:14:40.898223 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-utilities\") pod \"community-operators-s8gjw\" (UID: \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\") " pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:40 crc kubenswrapper[5016]: I1211 11:14:40.898254 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-catalog-content\") pod \"community-operators-s8gjw\" (UID: \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\") " pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:40 crc kubenswrapper[5016]: I1211 11:14:40.898739 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-catalog-content\") pod \"community-operators-s8gjw\" (UID: \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\") " pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:40 crc kubenswrapper[5016]: I1211 11:14:40.898960 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-utilities\") pod \"community-operators-s8gjw\" (UID: \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\") " pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:40 crc kubenswrapper[5016]: I1211 11:14:40.920608 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnj8s\" (UniqueName: \"kubernetes.io/projected/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-kube-api-access-hnj8s\") pod \"community-operators-s8gjw\" (UID: \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\") " pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:41 crc kubenswrapper[5016]: I1211 11:14:41.022539 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:41 crc kubenswrapper[5016]: I1211 11:14:41.612034 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s8gjw"] Dec 11 11:14:42 crc kubenswrapper[5016]: I1211 11:14:42.289260 5016 generic.go:334] "Generic (PLEG): container finished" podID="7fee40af-4c5d-4f07-850c-54fdad7cf8ad" containerID="823dc2322914f7930357cfcbf5191812510a1c04d253e33856f1cd74af39508e" exitCode=0 Dec 11 11:14:42 crc kubenswrapper[5016]: I1211 11:14:42.289313 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8gjw" event={"ID":"7fee40af-4c5d-4f07-850c-54fdad7cf8ad","Type":"ContainerDied","Data":"823dc2322914f7930357cfcbf5191812510a1c04d253e33856f1cd74af39508e"} Dec 11 11:14:42 crc kubenswrapper[5016]: I1211 11:14:42.289542 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8gjw" event={"ID":"7fee40af-4c5d-4f07-850c-54fdad7cf8ad","Type":"ContainerStarted","Data":"8ce2c1e18a2173e1ba0dc4ffd44704c7c38b1e4dcd26a11ef772243cdb921f95"} Dec 11 11:14:42 crc kubenswrapper[5016]: I1211 11:14:42.933163 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:14:42 crc kubenswrapper[5016]: I1211 11:14:42.933267 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:14:43 crc kubenswrapper[5016]: I1211 11:14:43.301158 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8gjw" event={"ID":"7fee40af-4c5d-4f07-850c-54fdad7cf8ad","Type":"ContainerStarted","Data":"8280c25a7a11f51a6a4b3012251c9d75e5c64b44c9e64f49a32351128856ebdc"} Dec 11 11:14:44 crc kubenswrapper[5016]: I1211 11:14:44.312706 5016 generic.go:334] "Generic (PLEG): container finished" podID="7fee40af-4c5d-4f07-850c-54fdad7cf8ad" containerID="8280c25a7a11f51a6a4b3012251c9d75e5c64b44c9e64f49a32351128856ebdc" exitCode=0 Dec 11 11:14:44 crc kubenswrapper[5016]: I1211 11:14:44.312811 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8gjw" event={"ID":"7fee40af-4c5d-4f07-850c-54fdad7cf8ad","Type":"ContainerDied","Data":"8280c25a7a11f51a6a4b3012251c9d75e5c64b44c9e64f49a32351128856ebdc"} Dec 11 11:14:46 crc kubenswrapper[5016]: I1211 11:14:46.340443 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8gjw" event={"ID":"7fee40af-4c5d-4f07-850c-54fdad7cf8ad","Type":"ContainerStarted","Data":"fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8"} Dec 11 11:14:46 crc kubenswrapper[5016]: I1211 11:14:46.368703 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-s8gjw" podStartSLOduration=3.59409571 podStartE2EDuration="6.368677485s" podCreationTimestamp="2025-12-11 11:14:40 +0000 UTC" firstStartedPulling="2025-12-11 11:14:42.290846649 +0000 UTC m=+2399.109406228" lastFinishedPulling="2025-12-11 11:14:45.065428414 +0000 UTC m=+2401.883988003" observedRunningTime="2025-12-11 11:14:46.360372971 +0000 UTC m=+2403.178932560" watchObservedRunningTime="2025-12-11 11:14:46.368677485 +0000 UTC m=+2403.187237064" Dec 11 11:14:51 crc kubenswrapper[5016]: I1211 11:14:51.023279 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:51 crc kubenswrapper[5016]: I1211 11:14:51.023766 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:51 crc kubenswrapper[5016]: I1211 11:14:51.084891 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:51 crc kubenswrapper[5016]: I1211 11:14:51.458381 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:51 crc kubenswrapper[5016]: I1211 11:14:51.520138 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s8gjw"] Dec 11 11:14:53 crc kubenswrapper[5016]: I1211 11:14:53.422556 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-s8gjw" podUID="7fee40af-4c5d-4f07-850c-54fdad7cf8ad" containerName="registry-server" containerID="cri-o://fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8" gracePeriod=2 Dec 11 11:14:53 crc kubenswrapper[5016]: I1211 11:14:53.919682 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.023569 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnj8s\" (UniqueName: \"kubernetes.io/projected/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-kube-api-access-hnj8s\") pod \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\" (UID: \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\") " Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.023753 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-catalog-content\") pod \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\" (UID: \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\") " Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.024074 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-utilities\") pod \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\" (UID: \"7fee40af-4c5d-4f07-850c-54fdad7cf8ad\") " Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.025289 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-utilities" (OuterVolumeSpecName: "utilities") pod "7fee40af-4c5d-4f07-850c-54fdad7cf8ad" (UID: "7fee40af-4c5d-4f07-850c-54fdad7cf8ad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.025478 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.033088 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-kube-api-access-hnj8s" (OuterVolumeSpecName: "kube-api-access-hnj8s") pod "7fee40af-4c5d-4f07-850c-54fdad7cf8ad" (UID: "7fee40af-4c5d-4f07-850c-54fdad7cf8ad"). InnerVolumeSpecName "kube-api-access-hnj8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.083462 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7fee40af-4c5d-4f07-850c-54fdad7cf8ad" (UID: "7fee40af-4c5d-4f07-850c-54fdad7cf8ad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.127929 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnj8s\" (UniqueName: \"kubernetes.io/projected/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-kube-api-access-hnj8s\") on node \"crc\" DevicePath \"\"" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.128005 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fee40af-4c5d-4f07-850c-54fdad7cf8ad-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.435318 5016 generic.go:334] "Generic (PLEG): container finished" podID="7fee40af-4c5d-4f07-850c-54fdad7cf8ad" containerID="fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8" exitCode=0 Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.435408 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s8gjw" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.435397 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8gjw" event={"ID":"7fee40af-4c5d-4f07-850c-54fdad7cf8ad","Type":"ContainerDied","Data":"fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8"} Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.435491 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8gjw" event={"ID":"7fee40af-4c5d-4f07-850c-54fdad7cf8ad","Type":"ContainerDied","Data":"8ce2c1e18a2173e1ba0dc4ffd44704c7c38b1e4dcd26a11ef772243cdb921f95"} Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.435520 5016 scope.go:117] "RemoveContainer" containerID="fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.470483 5016 scope.go:117] "RemoveContainer" containerID="8280c25a7a11f51a6a4b3012251c9d75e5c64b44c9e64f49a32351128856ebdc" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.476857 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s8gjw"] Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.486708 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-s8gjw"] Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.505668 5016 scope.go:117] "RemoveContainer" containerID="823dc2322914f7930357cfcbf5191812510a1c04d253e33856f1cd74af39508e" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.566634 5016 scope.go:117] "RemoveContainer" containerID="fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8" Dec 11 11:14:54 crc kubenswrapper[5016]: E1211 11:14:54.567587 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8\": container with ID starting with fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8 not found: ID does not exist" containerID="fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.567641 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8"} err="failed to get container status \"fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8\": rpc error: code = NotFound desc = could not find container \"fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8\": container with ID starting with fc24c46e45cf6ba45ca889ed078a7c97a51eebb43153611f4be47a72121cdcd8 not found: ID does not exist" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.567677 5016 scope.go:117] "RemoveContainer" containerID="8280c25a7a11f51a6a4b3012251c9d75e5c64b44c9e64f49a32351128856ebdc" Dec 11 11:14:54 crc kubenswrapper[5016]: E1211 11:14:54.568286 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8280c25a7a11f51a6a4b3012251c9d75e5c64b44c9e64f49a32351128856ebdc\": container with ID starting with 8280c25a7a11f51a6a4b3012251c9d75e5c64b44c9e64f49a32351128856ebdc not found: ID does not exist" containerID="8280c25a7a11f51a6a4b3012251c9d75e5c64b44c9e64f49a32351128856ebdc" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.568328 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8280c25a7a11f51a6a4b3012251c9d75e5c64b44c9e64f49a32351128856ebdc"} err="failed to get container status \"8280c25a7a11f51a6a4b3012251c9d75e5c64b44c9e64f49a32351128856ebdc\": rpc error: code = NotFound desc = could not find container \"8280c25a7a11f51a6a4b3012251c9d75e5c64b44c9e64f49a32351128856ebdc\": container with ID starting with 8280c25a7a11f51a6a4b3012251c9d75e5c64b44c9e64f49a32351128856ebdc not found: ID does not exist" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.568358 5016 scope.go:117] "RemoveContainer" containerID="823dc2322914f7930357cfcbf5191812510a1c04d253e33856f1cd74af39508e" Dec 11 11:14:54 crc kubenswrapper[5016]: E1211 11:14:54.568652 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"823dc2322914f7930357cfcbf5191812510a1c04d253e33856f1cd74af39508e\": container with ID starting with 823dc2322914f7930357cfcbf5191812510a1c04d253e33856f1cd74af39508e not found: ID does not exist" containerID="823dc2322914f7930357cfcbf5191812510a1c04d253e33856f1cd74af39508e" Dec 11 11:14:54 crc kubenswrapper[5016]: I1211 11:14:54.568687 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"823dc2322914f7930357cfcbf5191812510a1c04d253e33856f1cd74af39508e"} err="failed to get container status \"823dc2322914f7930357cfcbf5191812510a1c04d253e33856f1cd74af39508e\": rpc error: code = NotFound desc = could not find container \"823dc2322914f7930357cfcbf5191812510a1c04d253e33856f1cd74af39508e\": container with ID starting with 823dc2322914f7930357cfcbf5191812510a1c04d253e33856f1cd74af39508e not found: ID does not exist" Dec 11 11:14:55 crc kubenswrapper[5016]: I1211 11:14:55.487860 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fee40af-4c5d-4f07-850c-54fdad7cf8ad" path="/var/lib/kubelet/pods/7fee40af-4c5d-4f07-850c-54fdad7cf8ad/volumes" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.149129 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk"] Dec 11 11:15:00 crc kubenswrapper[5016]: E1211 11:15:00.150124 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fee40af-4c5d-4f07-850c-54fdad7cf8ad" containerName="extract-utilities" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.150142 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fee40af-4c5d-4f07-850c-54fdad7cf8ad" containerName="extract-utilities" Dec 11 11:15:00 crc kubenswrapper[5016]: E1211 11:15:00.150157 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fee40af-4c5d-4f07-850c-54fdad7cf8ad" containerName="extract-content" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.150166 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fee40af-4c5d-4f07-850c-54fdad7cf8ad" containerName="extract-content" Dec 11 11:15:00 crc kubenswrapper[5016]: E1211 11:15:00.150199 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fee40af-4c5d-4f07-850c-54fdad7cf8ad" containerName="registry-server" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.150207 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fee40af-4c5d-4f07-850c-54fdad7cf8ad" containerName="registry-server" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.150422 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fee40af-4c5d-4f07-850c-54fdad7cf8ad" containerName="registry-server" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.151130 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.154802 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.154917 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.164156 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk"] Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.261373 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3629624c-34de-4948-9118-e810f1dcfdd4-secret-volume\") pod \"collect-profiles-29424195-9rxhk\" (UID: \"3629624c-34de-4948-9118-e810f1dcfdd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.261963 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67gm5\" (UniqueName: \"kubernetes.io/projected/3629624c-34de-4948-9118-e810f1dcfdd4-kube-api-access-67gm5\") pod \"collect-profiles-29424195-9rxhk\" (UID: \"3629624c-34de-4948-9118-e810f1dcfdd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.262080 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3629624c-34de-4948-9118-e810f1dcfdd4-config-volume\") pod \"collect-profiles-29424195-9rxhk\" (UID: \"3629624c-34de-4948-9118-e810f1dcfdd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.364458 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67gm5\" (UniqueName: \"kubernetes.io/projected/3629624c-34de-4948-9118-e810f1dcfdd4-kube-api-access-67gm5\") pod \"collect-profiles-29424195-9rxhk\" (UID: \"3629624c-34de-4948-9118-e810f1dcfdd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.364524 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3629624c-34de-4948-9118-e810f1dcfdd4-config-volume\") pod \"collect-profiles-29424195-9rxhk\" (UID: \"3629624c-34de-4948-9118-e810f1dcfdd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.364595 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3629624c-34de-4948-9118-e810f1dcfdd4-secret-volume\") pod \"collect-profiles-29424195-9rxhk\" (UID: \"3629624c-34de-4948-9118-e810f1dcfdd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.365712 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3629624c-34de-4948-9118-e810f1dcfdd4-config-volume\") pod \"collect-profiles-29424195-9rxhk\" (UID: \"3629624c-34de-4948-9118-e810f1dcfdd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.370210 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3629624c-34de-4948-9118-e810f1dcfdd4-secret-volume\") pod \"collect-profiles-29424195-9rxhk\" (UID: \"3629624c-34de-4948-9118-e810f1dcfdd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.383667 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67gm5\" (UniqueName: \"kubernetes.io/projected/3629624c-34de-4948-9118-e810f1dcfdd4-kube-api-access-67gm5\") pod \"collect-profiles-29424195-9rxhk\" (UID: \"3629624c-34de-4948-9118-e810f1dcfdd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.491457 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:00 crc kubenswrapper[5016]: I1211 11:15:00.979634 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk"] Dec 11 11:15:01 crc kubenswrapper[5016]: I1211 11:15:01.531057 5016 generic.go:334] "Generic (PLEG): container finished" podID="3629624c-34de-4948-9118-e810f1dcfdd4" containerID="52ff86fe05208b0dc1a4a6bf336d4bcaef312a0d9fc844221a104e86cf4d3acd" exitCode=0 Dec 11 11:15:01 crc kubenswrapper[5016]: I1211 11:15:01.531115 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" event={"ID":"3629624c-34de-4948-9118-e810f1dcfdd4","Type":"ContainerDied","Data":"52ff86fe05208b0dc1a4a6bf336d4bcaef312a0d9fc844221a104e86cf4d3acd"} Dec 11 11:15:01 crc kubenswrapper[5016]: I1211 11:15:01.531416 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" event={"ID":"3629624c-34de-4948-9118-e810f1dcfdd4","Type":"ContainerStarted","Data":"708cfae57870a84226d32386e7b6d39265c6745fc14765a326e3ecddba13ad97"} Dec 11 11:15:02 crc kubenswrapper[5016]: I1211 11:15:02.888279 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.020460 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3629624c-34de-4948-9118-e810f1dcfdd4-secret-volume\") pod \"3629624c-34de-4948-9118-e810f1dcfdd4\" (UID: \"3629624c-34de-4948-9118-e810f1dcfdd4\") " Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.020776 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3629624c-34de-4948-9118-e810f1dcfdd4-config-volume\") pod \"3629624c-34de-4948-9118-e810f1dcfdd4\" (UID: \"3629624c-34de-4948-9118-e810f1dcfdd4\") " Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.020900 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67gm5\" (UniqueName: \"kubernetes.io/projected/3629624c-34de-4948-9118-e810f1dcfdd4-kube-api-access-67gm5\") pod \"3629624c-34de-4948-9118-e810f1dcfdd4\" (UID: \"3629624c-34de-4948-9118-e810f1dcfdd4\") " Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.021494 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3629624c-34de-4948-9118-e810f1dcfdd4-config-volume" (OuterVolumeSpecName: "config-volume") pod "3629624c-34de-4948-9118-e810f1dcfdd4" (UID: "3629624c-34de-4948-9118-e810f1dcfdd4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.028159 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3629624c-34de-4948-9118-e810f1dcfdd4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3629624c-34de-4948-9118-e810f1dcfdd4" (UID: "3629624c-34de-4948-9118-e810f1dcfdd4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.036192 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3629624c-34de-4948-9118-e810f1dcfdd4-kube-api-access-67gm5" (OuterVolumeSpecName: "kube-api-access-67gm5") pod "3629624c-34de-4948-9118-e810f1dcfdd4" (UID: "3629624c-34de-4948-9118-e810f1dcfdd4"). InnerVolumeSpecName "kube-api-access-67gm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.123521 5016 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3629624c-34de-4948-9118-e810f1dcfdd4-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.123571 5016 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3629624c-34de-4948-9118-e810f1dcfdd4-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.123584 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67gm5\" (UniqueName: \"kubernetes.io/projected/3629624c-34de-4948-9118-e810f1dcfdd4-kube-api-access-67gm5\") on node \"crc\" DevicePath \"\"" Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.558062 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" event={"ID":"3629624c-34de-4948-9118-e810f1dcfdd4","Type":"ContainerDied","Data":"708cfae57870a84226d32386e7b6d39265c6745fc14765a326e3ecddba13ad97"} Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.558374 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="708cfae57870a84226d32386e7b6d39265c6745fc14765a326e3ecddba13ad97" Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.558131 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424195-9rxhk" Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.972731 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp"] Dec 11 11:15:03 crc kubenswrapper[5016]: I1211 11:15:03.981588 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424150-t79rp"] Dec 11 11:15:05 crc kubenswrapper[5016]: I1211 11:15:05.493014 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67dd196e-3271-4222-aa21-dfaf3278eee0" path="/var/lib/kubelet/pods/67dd196e-3271-4222-aa21-dfaf3278eee0/volumes" Dec 11 11:15:12 crc kubenswrapper[5016]: I1211 11:15:12.065907 5016 scope.go:117] "RemoveContainer" containerID="a935f10f6a83a521998e85aff4db8e97b019c18856bc6745278ee7decbad7bcf" Dec 11 11:15:12 crc kubenswrapper[5016]: I1211 11:15:12.933144 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:15:12 crc kubenswrapper[5016]: I1211 11:15:12.933565 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:15:12 crc kubenswrapper[5016]: I1211 11:15:12.933660 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 11:15:12 crc kubenswrapper[5016]: I1211 11:15:12.934832 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 11:15:12 crc kubenswrapper[5016]: I1211 11:15:12.934935 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" gracePeriod=600 Dec 11 11:15:13 crc kubenswrapper[5016]: E1211 11:15:13.059524 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:15:13 crc kubenswrapper[5016]: I1211 11:15:13.653850 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" exitCode=0 Dec 11 11:15:13 crc kubenswrapper[5016]: I1211 11:15:13.653959 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d"} Dec 11 11:15:13 crc kubenswrapper[5016]: I1211 11:15:13.654258 5016 scope.go:117] "RemoveContainer" containerID="745e2ce5229d52e8fc8364c6aec4dc08214336fac2ba63dc453c716f42c02e8d" Dec 11 11:15:13 crc kubenswrapper[5016]: I1211 11:15:13.655156 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:15:13 crc kubenswrapper[5016]: E1211 11:15:13.655491 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:15:26 crc kubenswrapper[5016]: I1211 11:15:26.474466 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:15:26 crc kubenswrapper[5016]: E1211 11:15:26.475496 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:15:41 crc kubenswrapper[5016]: I1211 11:15:41.475255 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:15:41 crc kubenswrapper[5016]: E1211 11:15:41.476161 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:15:56 crc kubenswrapper[5016]: I1211 11:15:56.474998 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:15:56 crc kubenswrapper[5016]: E1211 11:15:56.475731 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:16:07 crc kubenswrapper[5016]: I1211 11:16:07.475215 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:16:07 crc kubenswrapper[5016]: E1211 11:16:07.476448 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:16:20 crc kubenswrapper[5016]: I1211 11:16:20.475197 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:16:20 crc kubenswrapper[5016]: E1211 11:16:20.477210 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:16:31 crc kubenswrapper[5016]: I1211 11:16:31.474756 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:16:31 crc kubenswrapper[5016]: E1211 11:16:31.475541 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:16:45 crc kubenswrapper[5016]: I1211 11:16:45.474663 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:16:45 crc kubenswrapper[5016]: E1211 11:16:45.475517 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:16:57 crc kubenswrapper[5016]: I1211 11:16:57.474926 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:16:57 crc kubenswrapper[5016]: E1211 11:16:57.475675 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:17:09 crc kubenswrapper[5016]: I1211 11:17:09.474651 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:17:09 crc kubenswrapper[5016]: E1211 11:17:09.475467 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:17:24 crc kubenswrapper[5016]: I1211 11:17:24.474564 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:17:24 crc kubenswrapper[5016]: E1211 11:17:24.475521 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:17:38 crc kubenswrapper[5016]: I1211 11:17:38.474444 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:17:38 crc kubenswrapper[5016]: E1211 11:17:38.475309 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:17:53 crc kubenswrapper[5016]: I1211 11:17:53.482669 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:17:53 crc kubenswrapper[5016]: E1211 11:17:53.483917 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:18:07 crc kubenswrapper[5016]: I1211 11:18:07.475493 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:18:07 crc kubenswrapper[5016]: E1211 11:18:07.476471 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:18:19 crc kubenswrapper[5016]: I1211 11:18:19.476356 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:18:19 crc kubenswrapper[5016]: E1211 11:18:19.478501 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:18:20 crc kubenswrapper[5016]: I1211 11:18:20.553353 5016 generic.go:334] "Generic (PLEG): container finished" podID="ff52a65c-c0b6-4d71-8038-b8c079cd1d64" containerID="20b66f7f30e168d1c3da72148f46a8d8168a72170742f3ac64ebb40d96c4b24b" exitCode=0 Dec 11 11:18:20 crc kubenswrapper[5016]: I1211 11:18:20.553435 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" event={"ID":"ff52a65c-c0b6-4d71-8038-b8c079cd1d64","Type":"ContainerDied","Data":"20b66f7f30e168d1c3da72148f46a8d8168a72170742f3ac64ebb40d96c4b24b"} Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.045123 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.163221 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-inventory\") pod \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.163276 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-libvirt-combined-ca-bundle\") pod \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.163310 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzwjj\" (UniqueName: \"kubernetes.io/projected/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-kube-api-access-rzwjj\") pod \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.163356 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-ssh-key\") pod \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.163388 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-libvirt-secret-0\") pod \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\" (UID: \"ff52a65c-c0b6-4d71-8038-b8c079cd1d64\") " Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.172051 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "ff52a65c-c0b6-4d71-8038-b8c079cd1d64" (UID: "ff52a65c-c0b6-4d71-8038-b8c079cd1d64"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.172416 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-kube-api-access-rzwjj" (OuterVolumeSpecName: "kube-api-access-rzwjj") pod "ff52a65c-c0b6-4d71-8038-b8c079cd1d64" (UID: "ff52a65c-c0b6-4d71-8038-b8c079cd1d64"). InnerVolumeSpecName "kube-api-access-rzwjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.198132 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "ff52a65c-c0b6-4d71-8038-b8c079cd1d64" (UID: "ff52a65c-c0b6-4d71-8038-b8c079cd1d64"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.200833 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-inventory" (OuterVolumeSpecName: "inventory") pod "ff52a65c-c0b6-4d71-8038-b8c079cd1d64" (UID: "ff52a65c-c0b6-4d71-8038-b8c079cd1d64"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.204693 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ff52a65c-c0b6-4d71-8038-b8c079cd1d64" (UID: "ff52a65c-c0b6-4d71-8038-b8c079cd1d64"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.266823 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.266872 5016 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.266929 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzwjj\" (UniqueName: \"kubernetes.io/projected/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-kube-api-access-rzwjj\") on node \"crc\" DevicePath \"\"" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.266967 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.266984 5016 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ff52a65c-c0b6-4d71-8038-b8c079cd1d64-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.578871 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" event={"ID":"ff52a65c-c0b6-4d71-8038-b8c079cd1d64","Type":"ContainerDied","Data":"86a2db051237f9f584cdaaa5e547b2b989ac58ff0023074907961f081879b135"} Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.578928 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86a2db051237f9f584cdaaa5e547b2b989ac58ff0023074907961f081879b135" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.578983 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.692833 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl"] Dec 11 11:18:22 crc kubenswrapper[5016]: E1211 11:18:22.694802 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3629624c-34de-4948-9118-e810f1dcfdd4" containerName="collect-profiles" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.694824 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="3629624c-34de-4948-9118-e810f1dcfdd4" containerName="collect-profiles" Dec 11 11:18:22 crc kubenswrapper[5016]: E1211 11:18:22.694866 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff52a65c-c0b6-4d71-8038-b8c079cd1d64" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.694875 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff52a65c-c0b6-4d71-8038-b8c079cd1d64" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.695114 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="3629624c-34de-4948-9118-e810f1dcfdd4" containerName="collect-profiles" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.695130 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff52a65c-c0b6-4d71-8038-b8c079cd1d64" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.695846 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.699282 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.699484 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.699539 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.699578 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.699501 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.699682 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.699503 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.711458 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl"] Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.879789 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.879882 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.879927 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.880015 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.880332 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.880664 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.881754 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77l45\" (UniqueName: \"kubernetes.io/projected/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-kube-api-access-77l45\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.881907 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.882121 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.983978 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.984028 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.984094 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.984139 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77l45\" (UniqueName: \"kubernetes.io/projected/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-kube-api-access-77l45\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.984180 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.984226 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.984279 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.984333 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.984369 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.988972 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.991612 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.995854 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:22 crc kubenswrapper[5016]: I1211 11:18:22.996658 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:23 crc kubenswrapper[5016]: I1211 11:18:23.000779 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:23 crc kubenswrapper[5016]: I1211 11:18:23.004173 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:23 crc kubenswrapper[5016]: I1211 11:18:23.006739 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:23 crc kubenswrapper[5016]: I1211 11:18:23.010180 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:23 crc kubenswrapper[5016]: I1211 11:18:23.031509 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77l45\" (UniqueName: \"kubernetes.io/projected/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-kube-api-access-77l45\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kwsxl\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:23 crc kubenswrapper[5016]: I1211 11:18:23.323491 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:18:23 crc kubenswrapper[5016]: I1211 11:18:23.824192 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl"] Dec 11 11:18:23 crc kubenswrapper[5016]: I1211 11:18:23.830854 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 11:18:24 crc kubenswrapper[5016]: I1211 11:18:24.603152 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" event={"ID":"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6","Type":"ContainerStarted","Data":"24d59a6e0a0d8f5db1789e6fde8a6d85ed4d850b2dfae24bf07d8b0fe1810065"} Dec 11 11:18:24 crc kubenswrapper[5016]: I1211 11:18:24.604732 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" event={"ID":"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6","Type":"ContainerStarted","Data":"e347f2f2236a9d913c80b97be6711cc0f6f0806940578df520cfb52e4ae70bab"} Dec 11 11:18:24 crc kubenswrapper[5016]: I1211 11:18:24.638094 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" podStartSLOduration=2.198881927 podStartE2EDuration="2.638064907s" podCreationTimestamp="2025-12-11 11:18:22 +0000 UTC" firstStartedPulling="2025-12-11 11:18:23.830495238 +0000 UTC m=+2620.649054827" lastFinishedPulling="2025-12-11 11:18:24.269678228 +0000 UTC m=+2621.088237807" observedRunningTime="2025-12-11 11:18:24.631126886 +0000 UTC m=+2621.449686485" watchObservedRunningTime="2025-12-11 11:18:24.638064907 +0000 UTC m=+2621.456624506" Dec 11 11:18:32 crc kubenswrapper[5016]: I1211 11:18:32.474930 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:18:32 crc kubenswrapper[5016]: E1211 11:18:32.475856 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:18:46 crc kubenswrapper[5016]: I1211 11:18:46.475219 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:18:46 crc kubenswrapper[5016]: E1211 11:18:46.476785 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:18:58 crc kubenswrapper[5016]: I1211 11:18:58.474512 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:18:58 crc kubenswrapper[5016]: E1211 11:18:58.475331 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:19:12 crc kubenswrapper[5016]: I1211 11:19:12.475214 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:19:12 crc kubenswrapper[5016]: E1211 11:19:12.476053 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:19:26 crc kubenswrapper[5016]: I1211 11:19:26.474904 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:19:26 crc kubenswrapper[5016]: E1211 11:19:26.475764 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:19:37 crc kubenswrapper[5016]: I1211 11:19:37.476525 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:19:37 crc kubenswrapper[5016]: E1211 11:19:37.477355 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:19:51 crc kubenswrapper[5016]: I1211 11:19:51.475044 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:19:51 crc kubenswrapper[5016]: E1211 11:19:51.476448 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:20:03 crc kubenswrapper[5016]: I1211 11:20:03.481074 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:20:03 crc kubenswrapper[5016]: E1211 11:20:03.481838 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:20:17 crc kubenswrapper[5016]: I1211 11:20:17.476043 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:20:17 crc kubenswrapper[5016]: I1211 11:20:17.801153 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"4b2dcd166d06f6950e9f92f680a9105092928ed7c54642b432cb05388ec02325"} Dec 11 11:20:48 crc kubenswrapper[5016]: I1211 11:20:48.803323 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ftcxf"] Dec 11 11:20:48 crc kubenswrapper[5016]: I1211 11:20:48.805927 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:48 crc kubenswrapper[5016]: I1211 11:20:48.812361 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ftcxf"] Dec 11 11:20:48 crc kubenswrapper[5016]: I1211 11:20:48.841884 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k67ns\" (UniqueName: \"kubernetes.io/projected/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-kube-api-access-k67ns\") pod \"redhat-marketplace-ftcxf\" (UID: \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\") " pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:48 crc kubenswrapper[5016]: I1211 11:20:48.841956 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-catalog-content\") pod \"redhat-marketplace-ftcxf\" (UID: \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\") " pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:48 crc kubenswrapper[5016]: I1211 11:20:48.842020 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-utilities\") pod \"redhat-marketplace-ftcxf\" (UID: \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\") " pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:48 crc kubenswrapper[5016]: I1211 11:20:48.943591 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-utilities\") pod \"redhat-marketplace-ftcxf\" (UID: \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\") " pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:48 crc kubenswrapper[5016]: I1211 11:20:48.943786 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k67ns\" (UniqueName: \"kubernetes.io/projected/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-kube-api-access-k67ns\") pod \"redhat-marketplace-ftcxf\" (UID: \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\") " pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:48 crc kubenswrapper[5016]: I1211 11:20:48.943840 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-catalog-content\") pod \"redhat-marketplace-ftcxf\" (UID: \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\") " pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:48 crc kubenswrapper[5016]: I1211 11:20:48.944348 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-utilities\") pod \"redhat-marketplace-ftcxf\" (UID: \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\") " pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:48 crc kubenswrapper[5016]: I1211 11:20:48.944432 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-catalog-content\") pod \"redhat-marketplace-ftcxf\" (UID: \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\") " pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:48 crc kubenswrapper[5016]: I1211 11:20:48.974176 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k67ns\" (UniqueName: \"kubernetes.io/projected/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-kube-api-access-k67ns\") pod \"redhat-marketplace-ftcxf\" (UID: \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\") " pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:49 crc kubenswrapper[5016]: I1211 11:20:49.138221 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:49 crc kubenswrapper[5016]: I1211 11:20:49.625763 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ftcxf"] Dec 11 11:20:50 crc kubenswrapper[5016]: I1211 11:20:50.108492 5016 generic.go:334] "Generic (PLEG): container finished" podID="5a599a8c-69a4-4202-9342-f7e7d40ca6ed" containerID="4a797248294579d4c1ec81225e8392fc931707e9595ff5834957dd7084af679d" exitCode=0 Dec 11 11:20:50 crc kubenswrapper[5016]: I1211 11:20:50.108607 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ftcxf" event={"ID":"5a599a8c-69a4-4202-9342-f7e7d40ca6ed","Type":"ContainerDied","Data":"4a797248294579d4c1ec81225e8392fc931707e9595ff5834957dd7084af679d"} Dec 11 11:20:50 crc kubenswrapper[5016]: I1211 11:20:50.108772 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ftcxf" event={"ID":"5a599a8c-69a4-4202-9342-f7e7d40ca6ed","Type":"ContainerStarted","Data":"84ad2e165fc12219739516480b4015e8e6a1508dd006ca302a040fac57a6053c"} Dec 11 11:20:52 crc kubenswrapper[5016]: I1211 11:20:52.130773 5016 generic.go:334] "Generic (PLEG): container finished" podID="5a599a8c-69a4-4202-9342-f7e7d40ca6ed" containerID="e751d9d4f74d5e8c6736b4f7c57262d10aba7d82543f078e28574450d0648d84" exitCode=0 Dec 11 11:20:52 crc kubenswrapper[5016]: I1211 11:20:52.130863 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ftcxf" event={"ID":"5a599a8c-69a4-4202-9342-f7e7d40ca6ed","Type":"ContainerDied","Data":"e751d9d4f74d5e8c6736b4f7c57262d10aba7d82543f078e28574450d0648d84"} Dec 11 11:20:53 crc kubenswrapper[5016]: I1211 11:20:53.144486 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ftcxf" event={"ID":"5a599a8c-69a4-4202-9342-f7e7d40ca6ed","Type":"ContainerStarted","Data":"73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c"} Dec 11 11:20:53 crc kubenswrapper[5016]: I1211 11:20:53.167532 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ftcxf" podStartSLOduration=2.585686419 podStartE2EDuration="5.167497167s" podCreationTimestamp="2025-12-11 11:20:48 +0000 UTC" firstStartedPulling="2025-12-11 11:20:50.111178571 +0000 UTC m=+2766.929738150" lastFinishedPulling="2025-12-11 11:20:52.692989319 +0000 UTC m=+2769.511548898" observedRunningTime="2025-12-11 11:20:53.162817692 +0000 UTC m=+2769.981377291" watchObservedRunningTime="2025-12-11 11:20:53.167497167 +0000 UTC m=+2769.986056766" Dec 11 11:20:59 crc kubenswrapper[5016]: I1211 11:20:59.139466 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:59 crc kubenswrapper[5016]: I1211 11:20:59.140078 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:59 crc kubenswrapper[5016]: I1211 11:20:59.189293 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:59 crc kubenswrapper[5016]: I1211 11:20:59.265583 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:20:59 crc kubenswrapper[5016]: I1211 11:20:59.433051 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ftcxf"] Dec 11 11:21:01 crc kubenswrapper[5016]: I1211 11:21:01.218660 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ftcxf" podUID="5a599a8c-69a4-4202-9342-f7e7d40ca6ed" containerName="registry-server" containerID="cri-o://73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c" gracePeriod=2 Dec 11 11:21:01 crc kubenswrapper[5016]: I1211 11:21:01.660149 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:21:01 crc kubenswrapper[5016]: I1211 11:21:01.821009 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-catalog-content\") pod \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\" (UID: \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\") " Dec 11 11:21:01 crc kubenswrapper[5016]: I1211 11:21:01.821136 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k67ns\" (UniqueName: \"kubernetes.io/projected/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-kube-api-access-k67ns\") pod \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\" (UID: \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\") " Dec 11 11:21:01 crc kubenswrapper[5016]: I1211 11:21:01.821403 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-utilities\") pod \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\" (UID: \"5a599a8c-69a4-4202-9342-f7e7d40ca6ed\") " Dec 11 11:21:01 crc kubenswrapper[5016]: I1211 11:21:01.822334 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-utilities" (OuterVolumeSpecName: "utilities") pod "5a599a8c-69a4-4202-9342-f7e7d40ca6ed" (UID: "5a599a8c-69a4-4202-9342-f7e7d40ca6ed"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:21:01 crc kubenswrapper[5016]: I1211 11:21:01.830047 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-kube-api-access-k67ns" (OuterVolumeSpecName: "kube-api-access-k67ns") pod "5a599a8c-69a4-4202-9342-f7e7d40ca6ed" (UID: "5a599a8c-69a4-4202-9342-f7e7d40ca6ed"). InnerVolumeSpecName "kube-api-access-k67ns". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:21:01 crc kubenswrapper[5016]: I1211 11:21:01.845456 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5a599a8c-69a4-4202-9342-f7e7d40ca6ed" (UID: "5a599a8c-69a4-4202-9342-f7e7d40ca6ed"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:21:01 crc kubenswrapper[5016]: I1211 11:21:01.924700 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:21:01 crc kubenswrapper[5016]: I1211 11:21:01.925040 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:21:01 crc kubenswrapper[5016]: I1211 11:21:01.925059 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k67ns\" (UniqueName: \"kubernetes.io/projected/5a599a8c-69a4-4202-9342-f7e7d40ca6ed-kube-api-access-k67ns\") on node \"crc\" DevicePath \"\"" Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.233504 5016 generic.go:334] "Generic (PLEG): container finished" podID="5a599a8c-69a4-4202-9342-f7e7d40ca6ed" containerID="73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c" exitCode=0 Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.233554 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ftcxf" event={"ID":"5a599a8c-69a4-4202-9342-f7e7d40ca6ed","Type":"ContainerDied","Data":"73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c"} Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.233601 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ftcxf" Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.233635 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ftcxf" event={"ID":"5a599a8c-69a4-4202-9342-f7e7d40ca6ed","Type":"ContainerDied","Data":"84ad2e165fc12219739516480b4015e8e6a1508dd006ca302a040fac57a6053c"} Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.233668 5016 scope.go:117] "RemoveContainer" containerID="73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c" Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.260509 5016 scope.go:117] "RemoveContainer" containerID="e751d9d4f74d5e8c6736b4f7c57262d10aba7d82543f078e28574450d0648d84" Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.283044 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ftcxf"] Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.290994 5016 scope.go:117] "RemoveContainer" containerID="4a797248294579d4c1ec81225e8392fc931707e9595ff5834957dd7084af679d" Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.291732 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ftcxf"] Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.341569 5016 scope.go:117] "RemoveContainer" containerID="73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c" Dec 11 11:21:02 crc kubenswrapper[5016]: E1211 11:21:02.342150 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c\": container with ID starting with 73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c not found: ID does not exist" containerID="73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c" Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.342269 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c"} err="failed to get container status \"73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c\": rpc error: code = NotFound desc = could not find container \"73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c\": container with ID starting with 73c5607c399162828616dfc21a56cf506a03f9c746a14e7b8105f0ac1299aa1c not found: ID does not exist" Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.342374 5016 scope.go:117] "RemoveContainer" containerID="e751d9d4f74d5e8c6736b4f7c57262d10aba7d82543f078e28574450d0648d84" Dec 11 11:21:02 crc kubenswrapper[5016]: E1211 11:21:02.342857 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e751d9d4f74d5e8c6736b4f7c57262d10aba7d82543f078e28574450d0648d84\": container with ID starting with e751d9d4f74d5e8c6736b4f7c57262d10aba7d82543f078e28574450d0648d84 not found: ID does not exist" containerID="e751d9d4f74d5e8c6736b4f7c57262d10aba7d82543f078e28574450d0648d84" Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.342917 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e751d9d4f74d5e8c6736b4f7c57262d10aba7d82543f078e28574450d0648d84"} err="failed to get container status \"e751d9d4f74d5e8c6736b4f7c57262d10aba7d82543f078e28574450d0648d84\": rpc error: code = NotFound desc = could not find container \"e751d9d4f74d5e8c6736b4f7c57262d10aba7d82543f078e28574450d0648d84\": container with ID starting with e751d9d4f74d5e8c6736b4f7c57262d10aba7d82543f078e28574450d0648d84 not found: ID does not exist" Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.342991 5016 scope.go:117] "RemoveContainer" containerID="4a797248294579d4c1ec81225e8392fc931707e9595ff5834957dd7084af679d" Dec 11 11:21:02 crc kubenswrapper[5016]: E1211 11:21:02.343318 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a797248294579d4c1ec81225e8392fc931707e9595ff5834957dd7084af679d\": container with ID starting with 4a797248294579d4c1ec81225e8392fc931707e9595ff5834957dd7084af679d not found: ID does not exist" containerID="4a797248294579d4c1ec81225e8392fc931707e9595ff5834957dd7084af679d" Dec 11 11:21:02 crc kubenswrapper[5016]: I1211 11:21:02.343361 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a797248294579d4c1ec81225e8392fc931707e9595ff5834957dd7084af679d"} err="failed to get container status \"4a797248294579d4c1ec81225e8392fc931707e9595ff5834957dd7084af679d\": rpc error: code = NotFound desc = could not find container \"4a797248294579d4c1ec81225e8392fc931707e9595ff5834957dd7084af679d\": container with ID starting with 4a797248294579d4c1ec81225e8392fc931707e9595ff5834957dd7084af679d not found: ID does not exist" Dec 11 11:21:03 crc kubenswrapper[5016]: I1211 11:21:03.484922 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a599a8c-69a4-4202-9342-f7e7d40ca6ed" path="/var/lib/kubelet/pods/5a599a8c-69a4-4202-9342-f7e7d40ca6ed/volumes" Dec 11 11:21:04 crc kubenswrapper[5016]: I1211 11:21:04.259023 5016 generic.go:334] "Generic (PLEG): container finished" podID="2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" containerID="24d59a6e0a0d8f5db1789e6fde8a6d85ed4d850b2dfae24bf07d8b0fe1810065" exitCode=0 Dec 11 11:21:04 crc kubenswrapper[5016]: I1211 11:21:04.259092 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" event={"ID":"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6","Type":"ContainerDied","Data":"24d59a6e0a0d8f5db1789e6fde8a6d85ed4d850b2dfae24bf07d8b0fe1810065"} Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.691996 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.815783 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-migration-ssh-key-1\") pod \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.815889 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-extra-config-0\") pod \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.815922 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-combined-ca-bundle\") pod \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.815985 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-cell1-compute-config-1\") pod \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.816060 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-migration-ssh-key-0\") pod \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.816090 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-ssh-key\") pod \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.816143 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-cell1-compute-config-0\") pod \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.816234 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77l45\" (UniqueName: \"kubernetes.io/projected/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-kube-api-access-77l45\") pod \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.816272 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-inventory\") pod \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\" (UID: \"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6\") " Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.823735 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" (UID: "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.843414 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-kube-api-access-77l45" (OuterVolumeSpecName: "kube-api-access-77l45") pod "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" (UID: "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6"). InnerVolumeSpecName "kube-api-access-77l45". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.851175 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" (UID: "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.857772 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" (UID: "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.867469 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" (UID: "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.867571 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" (UID: "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.867851 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" (UID: "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.869048 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-inventory" (OuterVolumeSpecName: "inventory") pod "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" (UID: "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.876729 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" (UID: "2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.918326 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.918367 5016 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.918382 5016 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.918393 5016 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.918403 5016 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.918414 5016 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.918424 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.918433 5016 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:21:05 crc kubenswrapper[5016]: I1211 11:21:05.918445 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77l45\" (UniqueName: \"kubernetes.io/projected/2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6-kube-api-access-77l45\") on node \"crc\" DevicePath \"\"" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.278278 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" event={"ID":"2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6","Type":"ContainerDied","Data":"e347f2f2236a9d913c80b97be6711cc0f6f0806940578df520cfb52e4ae70bab"} Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.278629 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e347f2f2236a9d913c80b97be6711cc0f6f0806940578df520cfb52e4ae70bab" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.278311 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kwsxl" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.385231 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr"] Dec 11 11:21:06 crc kubenswrapper[5016]: E1211 11:21:06.385838 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a599a8c-69a4-4202-9342-f7e7d40ca6ed" containerName="extract-content" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.385864 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a599a8c-69a4-4202-9342-f7e7d40ca6ed" containerName="extract-content" Dec 11 11:21:06 crc kubenswrapper[5016]: E1211 11:21:06.385891 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a599a8c-69a4-4202-9342-f7e7d40ca6ed" containerName="registry-server" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.385898 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a599a8c-69a4-4202-9342-f7e7d40ca6ed" containerName="registry-server" Dec 11 11:21:06 crc kubenswrapper[5016]: E1211 11:21:06.385916 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" containerName="nova-edpm-deployment-openstack-edpm-ipam" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.385924 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" containerName="nova-edpm-deployment-openstack-edpm-ipam" Dec 11 11:21:06 crc kubenswrapper[5016]: E1211 11:21:06.385941 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a599a8c-69a4-4202-9342-f7e7d40ca6ed" containerName="extract-utilities" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.385949 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a599a8c-69a4-4202-9342-f7e7d40ca6ed" containerName="extract-utilities" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.386210 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6" containerName="nova-edpm-deployment-openstack-edpm-ipam" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.386250 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a599a8c-69a4-4202-9342-f7e7d40ca6ed" containerName="registry-server" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.387091 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.391740 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.392418 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.392451 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.392477 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.392529 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-r5nb8" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.404779 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr"] Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.530636 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.530693 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65zmg\" (UniqueName: \"kubernetes.io/projected/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-kube-api-access-65zmg\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.530761 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.530873 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.531081 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.531111 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.531137 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.634208 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.634264 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.634292 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.634458 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.634712 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65zmg\" (UniqueName: \"kubernetes.io/projected/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-kube-api-access-65zmg\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.634804 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.634840 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.639861 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.640257 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.640545 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.641133 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.642085 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.645235 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.654796 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65zmg\" (UniqueName: \"kubernetes.io/projected/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-kube-api-access-65zmg\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:06 crc kubenswrapper[5016]: I1211 11:21:06.710108 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:21:07 crc kubenswrapper[5016]: I1211 11:21:07.321917 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr"] Dec 11 11:21:08 crc kubenswrapper[5016]: I1211 11:21:08.330510 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" event={"ID":"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a","Type":"ContainerStarted","Data":"7cf644e4f479eeeff0967e3c50a5a64cf66000b4ec05600db242494108620b71"} Dec 11 11:21:08 crc kubenswrapper[5016]: I1211 11:21:08.331854 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" event={"ID":"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a","Type":"ContainerStarted","Data":"345daf61efd5d06cd820414199b09b1a1798cbecda3680093394196eb4607af7"} Dec 11 11:21:08 crc kubenswrapper[5016]: I1211 11:21:08.354014 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" podStartSLOduration=1.8225318590000001 podStartE2EDuration="2.353970157s" podCreationTimestamp="2025-12-11 11:21:06 +0000 UTC" firstStartedPulling="2025-12-11 11:21:07.29998765 +0000 UTC m=+2784.118547229" lastFinishedPulling="2025-12-11 11:21:07.831425948 +0000 UTC m=+2784.649985527" observedRunningTime="2025-12-11 11:21:08.348118963 +0000 UTC m=+2785.166678562" watchObservedRunningTime="2025-12-11 11:21:08.353970157 +0000 UTC m=+2785.172529736" Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.177175 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l6bqr"] Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.180288 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.212611 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l6bqr"] Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.237800 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a30dded-eec4-4ced-92e2-bdc3209447aa-utilities\") pod \"certified-operators-l6bqr\" (UID: \"0a30dded-eec4-4ced-92e2-bdc3209447aa\") " pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.237985 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a30dded-eec4-4ced-92e2-bdc3209447aa-catalog-content\") pod \"certified-operators-l6bqr\" (UID: \"0a30dded-eec4-4ced-92e2-bdc3209447aa\") " pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.238129 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8grtw\" (UniqueName: \"kubernetes.io/projected/0a30dded-eec4-4ced-92e2-bdc3209447aa-kube-api-access-8grtw\") pod \"certified-operators-l6bqr\" (UID: \"0a30dded-eec4-4ced-92e2-bdc3209447aa\") " pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.339541 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a30dded-eec4-4ced-92e2-bdc3209447aa-catalog-content\") pod \"certified-operators-l6bqr\" (UID: \"0a30dded-eec4-4ced-92e2-bdc3209447aa\") " pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.339978 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8grtw\" (UniqueName: \"kubernetes.io/projected/0a30dded-eec4-4ced-92e2-bdc3209447aa-kube-api-access-8grtw\") pod \"certified-operators-l6bqr\" (UID: \"0a30dded-eec4-4ced-92e2-bdc3209447aa\") " pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.340151 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a30dded-eec4-4ced-92e2-bdc3209447aa-utilities\") pod \"certified-operators-l6bqr\" (UID: \"0a30dded-eec4-4ced-92e2-bdc3209447aa\") " pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.340813 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a30dded-eec4-4ced-92e2-bdc3209447aa-catalog-content\") pod \"certified-operators-l6bqr\" (UID: \"0a30dded-eec4-4ced-92e2-bdc3209447aa\") " pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.341011 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a30dded-eec4-4ced-92e2-bdc3209447aa-utilities\") pod \"certified-operators-l6bqr\" (UID: \"0a30dded-eec4-4ced-92e2-bdc3209447aa\") " pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.365586 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8grtw\" (UniqueName: \"kubernetes.io/projected/0a30dded-eec4-4ced-92e2-bdc3209447aa-kube-api-access-8grtw\") pod \"certified-operators-l6bqr\" (UID: \"0a30dded-eec4-4ced-92e2-bdc3209447aa\") " pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:10 crc kubenswrapper[5016]: I1211 11:22:10.504521 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:11 crc kubenswrapper[5016]: I1211 11:22:11.094807 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l6bqr"] Dec 11 11:22:11 crc kubenswrapper[5016]: W1211 11:22:11.119218 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a30dded_eec4_4ced_92e2_bdc3209447aa.slice/crio-68d784df952da44057b23e7a0a26d53b38b416f9e2b1c3d5d3ba716ffaf7ccd2 WatchSource:0}: Error finding container 68d784df952da44057b23e7a0a26d53b38b416f9e2b1c3d5d3ba716ffaf7ccd2: Status 404 returned error can't find the container with id 68d784df952da44057b23e7a0a26d53b38b416f9e2b1c3d5d3ba716ffaf7ccd2 Dec 11 11:22:12 crc kubenswrapper[5016]: I1211 11:22:12.120039 5016 generic.go:334] "Generic (PLEG): container finished" podID="0a30dded-eec4-4ced-92e2-bdc3209447aa" containerID="a41da76bff09d4c223f030a1219c7a51ebfa9ff99510e4b7c248ebed7e156c48" exitCode=0 Dec 11 11:22:12 crc kubenswrapper[5016]: I1211 11:22:12.120158 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l6bqr" event={"ID":"0a30dded-eec4-4ced-92e2-bdc3209447aa","Type":"ContainerDied","Data":"a41da76bff09d4c223f030a1219c7a51ebfa9ff99510e4b7c248ebed7e156c48"} Dec 11 11:22:12 crc kubenswrapper[5016]: I1211 11:22:12.120366 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l6bqr" event={"ID":"0a30dded-eec4-4ced-92e2-bdc3209447aa","Type":"ContainerStarted","Data":"68d784df952da44057b23e7a0a26d53b38b416f9e2b1c3d5d3ba716ffaf7ccd2"} Dec 11 11:22:18 crc kubenswrapper[5016]: I1211 11:22:18.180563 5016 generic.go:334] "Generic (PLEG): container finished" podID="0a30dded-eec4-4ced-92e2-bdc3209447aa" containerID="f2db2b8d7a857aad5017a028f0e6d3735c5a9279974836827ba97c5c21aa77d0" exitCode=0 Dec 11 11:22:18 crc kubenswrapper[5016]: I1211 11:22:18.180678 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l6bqr" event={"ID":"0a30dded-eec4-4ced-92e2-bdc3209447aa","Type":"ContainerDied","Data":"f2db2b8d7a857aad5017a028f0e6d3735c5a9279974836827ba97c5c21aa77d0"} Dec 11 11:22:19 crc kubenswrapper[5016]: I1211 11:22:19.194404 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l6bqr" event={"ID":"0a30dded-eec4-4ced-92e2-bdc3209447aa","Type":"ContainerStarted","Data":"265ccce149d4ff9332fd284cffc0de8d92edbc21231c98e202a77845b32e73e4"} Dec 11 11:22:19 crc kubenswrapper[5016]: I1211 11:22:19.219365 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l6bqr" podStartSLOduration=2.456556873 podStartE2EDuration="9.219335171s" podCreationTimestamp="2025-12-11 11:22:10 +0000 UTC" firstStartedPulling="2025-12-11 11:22:12.122422156 +0000 UTC m=+2848.940981735" lastFinishedPulling="2025-12-11 11:22:18.885200454 +0000 UTC m=+2855.703760033" observedRunningTime="2025-12-11 11:22:19.2123965 +0000 UTC m=+2856.030956099" watchObservedRunningTime="2025-12-11 11:22:19.219335171 +0000 UTC m=+2856.037894770" Dec 11 11:22:20 crc kubenswrapper[5016]: I1211 11:22:20.505562 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:20 crc kubenswrapper[5016]: I1211 11:22:20.505848 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:21 crc kubenswrapper[5016]: I1211 11:22:21.553869 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-l6bqr" podUID="0a30dded-eec4-4ced-92e2-bdc3209447aa" containerName="registry-server" probeResult="failure" output=< Dec 11 11:22:21 crc kubenswrapper[5016]: timeout: failed to connect service ":50051" within 1s Dec 11 11:22:21 crc kubenswrapper[5016]: > Dec 11 11:22:30 crc kubenswrapper[5016]: I1211 11:22:30.554457 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:30 crc kubenswrapper[5016]: I1211 11:22:30.617988 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l6bqr" Dec 11 11:22:30 crc kubenswrapper[5016]: I1211 11:22:30.684731 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l6bqr"] Dec 11 11:22:30 crc kubenswrapper[5016]: I1211 11:22:30.792724 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qgqq9"] Dec 11 11:22:30 crc kubenswrapper[5016]: I1211 11:22:30.793066 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qgqq9" podUID="db26e73a-9c91-47ec-9f36-9577c7e07e87" containerName="registry-server" containerID="cri-o://a655e126a60f82cfded89c7bfac2d47fd45483b6ed2cb369dd344c43fa0cd473" gracePeriod=2 Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.326971 5016 generic.go:334] "Generic (PLEG): container finished" podID="db26e73a-9c91-47ec-9f36-9577c7e07e87" containerID="a655e126a60f82cfded89c7bfac2d47fd45483b6ed2cb369dd344c43fa0cd473" exitCode=0 Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.327792 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgqq9" event={"ID":"db26e73a-9c91-47ec-9f36-9577c7e07e87","Type":"ContainerDied","Data":"a655e126a60f82cfded89c7bfac2d47fd45483b6ed2cb369dd344c43fa0cd473"} Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.327868 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qgqq9" event={"ID":"db26e73a-9c91-47ec-9f36-9577c7e07e87","Type":"ContainerDied","Data":"690f7d54dd1b312124a93178a326a0ee32c46beffef549b71a4a06cd0ac40e22"} Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.327890 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="690f7d54dd1b312124a93178a326a0ee32c46beffef549b71a4a06cd0ac40e22" Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.403143 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.525247 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db26e73a-9c91-47ec-9f36-9577c7e07e87-catalog-content\") pod \"db26e73a-9c91-47ec-9f36-9577c7e07e87\" (UID: \"db26e73a-9c91-47ec-9f36-9577c7e07e87\") " Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.525394 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-88zb8\" (UniqueName: \"kubernetes.io/projected/db26e73a-9c91-47ec-9f36-9577c7e07e87-kube-api-access-88zb8\") pod \"db26e73a-9c91-47ec-9f36-9577c7e07e87\" (UID: \"db26e73a-9c91-47ec-9f36-9577c7e07e87\") " Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.525525 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db26e73a-9c91-47ec-9f36-9577c7e07e87-utilities\") pod \"db26e73a-9c91-47ec-9f36-9577c7e07e87\" (UID: \"db26e73a-9c91-47ec-9f36-9577c7e07e87\") " Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.526378 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db26e73a-9c91-47ec-9f36-9577c7e07e87-utilities" (OuterVolumeSpecName: "utilities") pod "db26e73a-9c91-47ec-9f36-9577c7e07e87" (UID: "db26e73a-9c91-47ec-9f36-9577c7e07e87"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.548082 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db26e73a-9c91-47ec-9f36-9577c7e07e87-kube-api-access-88zb8" (OuterVolumeSpecName: "kube-api-access-88zb8") pod "db26e73a-9c91-47ec-9f36-9577c7e07e87" (UID: "db26e73a-9c91-47ec-9f36-9577c7e07e87"). InnerVolumeSpecName "kube-api-access-88zb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.588073 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db26e73a-9c91-47ec-9f36-9577c7e07e87-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "db26e73a-9c91-47ec-9f36-9577c7e07e87" (UID: "db26e73a-9c91-47ec-9f36-9577c7e07e87"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.628894 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db26e73a-9c91-47ec-9f36-9577c7e07e87-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.628953 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-88zb8\" (UniqueName: \"kubernetes.io/projected/db26e73a-9c91-47ec-9f36-9577c7e07e87-kube-api-access-88zb8\") on node \"crc\" DevicePath \"\"" Dec 11 11:22:32 crc kubenswrapper[5016]: I1211 11:22:32.628972 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db26e73a-9c91-47ec-9f36-9577c7e07e87-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:22:33 crc kubenswrapper[5016]: I1211 11:22:33.335904 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qgqq9" Dec 11 11:22:33 crc kubenswrapper[5016]: I1211 11:22:33.384525 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qgqq9"] Dec 11 11:22:33 crc kubenswrapper[5016]: I1211 11:22:33.393804 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qgqq9"] Dec 11 11:22:33 crc kubenswrapper[5016]: I1211 11:22:33.492833 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db26e73a-9c91-47ec-9f36-9577c7e07e87" path="/var/lib/kubelet/pods/db26e73a-9c91-47ec-9f36-9577c7e07e87/volumes" Dec 11 11:22:42 crc kubenswrapper[5016]: I1211 11:22:42.933387 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:22:42 crc kubenswrapper[5016]: I1211 11:22:42.934252 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:23:12 crc kubenswrapper[5016]: I1211 11:23:12.348083 5016 scope.go:117] "RemoveContainer" containerID="938adc5c0af855b7ba43c5137daea5c88bafd1d05623efd0347c206cc55a9a8b" Dec 11 11:23:12 crc kubenswrapper[5016]: I1211 11:23:12.373011 5016 scope.go:117] "RemoveContainer" containerID="a655e126a60f82cfded89c7bfac2d47fd45483b6ed2cb369dd344c43fa0cd473" Dec 11 11:23:12 crc kubenswrapper[5016]: I1211 11:23:12.418594 5016 scope.go:117] "RemoveContainer" containerID="0d856921d50eeaf1a281d0c638852cb75e106dc5456a8ed17467012eb47dc16f" Dec 11 11:23:12 crc kubenswrapper[5016]: I1211 11:23:12.933010 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:23:12 crc kubenswrapper[5016]: I1211 11:23:12.933081 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:23:32 crc kubenswrapper[5016]: I1211 11:23:32.869514 5016 generic.go:334] "Generic (PLEG): container finished" podID="604b9ba2-ab41-4901-a9ef-9eb82bee5e4a" containerID="7cf644e4f479eeeff0967e3c50a5a64cf66000b4ec05600db242494108620b71" exitCode=0 Dec 11 11:23:32 crc kubenswrapper[5016]: I1211 11:23:32.869608 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" event={"ID":"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a","Type":"ContainerDied","Data":"7cf644e4f479eeeff0967e3c50a5a64cf66000b4ec05600db242494108620b71"} Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.290670 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.377473 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-telemetry-combined-ca-bundle\") pod \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.377547 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-2\") pod \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.377637 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ssh-key\") pod \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.377684 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-inventory\") pod \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.377780 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-1\") pod \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.377813 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-65zmg\" (UniqueName: \"kubernetes.io/projected/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-kube-api-access-65zmg\") pod \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.377881 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-0\") pod \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\" (UID: \"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a\") " Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.383460 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a" (UID: "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.388229 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-kube-api-access-65zmg" (OuterVolumeSpecName: "kube-api-access-65zmg") pod "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a" (UID: "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a"). InnerVolumeSpecName "kube-api-access-65zmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.407908 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-inventory" (OuterVolumeSpecName: "inventory") pod "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a" (UID: "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.409606 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a" (UID: "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.410150 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a" (UID: "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.413302 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a" (UID: "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.413715 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a" (UID: "604b9ba2-ab41-4901-a9ef-9eb82bee5e4a"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.479654 5016 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.479956 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.480077 5016 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-inventory\") on node \"crc\" DevicePath \"\"" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.480174 5016 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.480268 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-65zmg\" (UniqueName: \"kubernetes.io/projected/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-kube-api-access-65zmg\") on node \"crc\" DevicePath \"\"" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.480351 5016 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.480419 5016 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/604b9ba2-ab41-4901-a9ef-9eb82bee5e4a-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.890550 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" event={"ID":"604b9ba2-ab41-4901-a9ef-9eb82bee5e4a","Type":"ContainerDied","Data":"345daf61efd5d06cd820414199b09b1a1798cbecda3680093394196eb4607af7"} Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.890596 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="345daf61efd5d06cd820414199b09b1a1798cbecda3680093394196eb4607af7" Dec 11 11:23:34 crc kubenswrapper[5016]: I1211 11:23:34.891099 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr" Dec 11 11:23:42 crc kubenswrapper[5016]: I1211 11:23:42.932628 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:23:42 crc kubenswrapper[5016]: I1211 11:23:42.933181 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:23:42 crc kubenswrapper[5016]: I1211 11:23:42.933245 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 11:23:42 crc kubenswrapper[5016]: I1211 11:23:42.934120 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4b2dcd166d06f6950e9f92f680a9105092928ed7c54642b432cb05388ec02325"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 11:23:42 crc kubenswrapper[5016]: I1211 11:23:42.934181 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://4b2dcd166d06f6950e9f92f680a9105092928ed7c54642b432cb05388ec02325" gracePeriod=600 Dec 11 11:23:43 crc kubenswrapper[5016]: I1211 11:23:43.993091 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="4b2dcd166d06f6950e9f92f680a9105092928ed7c54642b432cb05388ec02325" exitCode=0 Dec 11 11:23:43 crc kubenswrapper[5016]: I1211 11:23:43.993626 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"4b2dcd166d06f6950e9f92f680a9105092928ed7c54642b432cb05388ec02325"} Dec 11 11:23:43 crc kubenswrapper[5016]: I1211 11:23:43.993658 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8"} Dec 11 11:23:43 crc kubenswrapper[5016]: I1211 11:23:43.993677 5016 scope.go:117] "RemoveContainer" containerID="d10d0d866afdee87d9826c73db3f989dbadae27fbdb5e7965ea27a6b6d46257d" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.924363 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Dec 11 11:24:19 crc kubenswrapper[5016]: E1211 11:24:19.925667 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db26e73a-9c91-47ec-9f36-9577c7e07e87" containerName="registry-server" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.925685 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="db26e73a-9c91-47ec-9f36-9577c7e07e87" containerName="registry-server" Dec 11 11:24:19 crc kubenswrapper[5016]: E1211 11:24:19.925699 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db26e73a-9c91-47ec-9f36-9577c7e07e87" containerName="extract-utilities" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.925705 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="db26e73a-9c91-47ec-9f36-9577c7e07e87" containerName="extract-utilities" Dec 11 11:24:19 crc kubenswrapper[5016]: E1211 11:24:19.925735 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="604b9ba2-ab41-4901-a9ef-9eb82bee5e4a" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.925743 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="604b9ba2-ab41-4901-a9ef-9eb82bee5e4a" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 11 11:24:19 crc kubenswrapper[5016]: E1211 11:24:19.925751 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db26e73a-9c91-47ec-9f36-9577c7e07e87" containerName="extract-content" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.925757 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="db26e73a-9c91-47ec-9f36-9577c7e07e87" containerName="extract-content" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.925983 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="db26e73a-9c91-47ec-9f36-9577c7e07e87" containerName="registry-server" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.926200 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="604b9ba2-ab41-4901-a9ef-9eb82bee5e4a" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.926851 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.936612 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.936761 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-dwb7v" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.936893 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.936964 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Dec 11 11:24:19 crc kubenswrapper[5016]: I1211 11:24:19.947888 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.065225 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d9613e90-5366-4f68-80dd-f66a7541a670-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.065568 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/d9613e90-5366-4f68-80dd-f66a7541a670-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.065596 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.065634 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.065677 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.065700 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7h7g\" (UniqueName: \"kubernetes.io/projected/d9613e90-5366-4f68-80dd-f66a7541a670-kube-api-access-b7h7g\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.065734 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d9613e90-5366-4f68-80dd-f66a7541a670-config-data\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.065756 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.065808 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/d9613e90-5366-4f68-80dd-f66a7541a670-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.167957 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.168018 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/d9613e90-5366-4f68-80dd-f66a7541a670-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.168121 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d9613e90-5366-4f68-80dd-f66a7541a670-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.168189 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/d9613e90-5366-4f68-80dd-f66a7541a670-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.168220 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.168266 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.168306 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.168618 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.169092 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7h7g\" (UniqueName: \"kubernetes.io/projected/d9613e90-5366-4f68-80dd-f66a7541a670-kube-api-access-b7h7g\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.169141 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/d9613e90-5366-4f68-80dd-f66a7541a670-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.169228 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d9613e90-5366-4f68-80dd-f66a7541a670-config-data\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.169576 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/d9613e90-5366-4f68-80dd-f66a7541a670-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.169667 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d9613e90-5366-4f68-80dd-f66a7541a670-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.170351 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d9613e90-5366-4f68-80dd-f66a7541a670-config-data\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.174966 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.176497 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.176794 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.191602 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7h7g\" (UniqueName: \"kubernetes.io/projected/d9613e90-5366-4f68-80dd-f66a7541a670-kube-api-access-b7h7g\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.197763 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"tempest-tests-tempest\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.259365 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.699306 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Dec 11 11:24:20 crc kubenswrapper[5016]: I1211 11:24:20.722094 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 11:24:21 crc kubenswrapper[5016]: I1211 11:24:21.375697 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"d9613e90-5366-4f68-80dd-f66a7541a670","Type":"ContainerStarted","Data":"45bccf0c4071558dbe16f6fcf48ed1b06d5ed54d34d26a8c681d7cb8b4ce73f5"} Dec 11 11:24:55 crc kubenswrapper[5016]: E1211 11:24:55.185478 5016 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Dec 11 11:24:55 crc kubenswrapper[5016]: E1211 11:24:55.186157 5016 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b7h7g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(d9613e90-5366-4f68-80dd-f66a7541a670): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 11 11:24:55 crc kubenswrapper[5016]: E1211 11:24:55.187730 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="d9613e90-5366-4f68-80dd-f66a7541a670" Dec 11 11:24:55 crc kubenswrapper[5016]: E1211 11:24:55.738307 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="d9613e90-5366-4f68-80dd-f66a7541a670" Dec 11 11:25:08 crc kubenswrapper[5016]: I1211 11:25:08.316215 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Dec 11 11:25:09 crc kubenswrapper[5016]: I1211 11:25:09.883846 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"d9613e90-5366-4f68-80dd-f66a7541a670","Type":"ContainerStarted","Data":"1fa15149d2490ac1839de31b01d3939635b23b0f98e6ffc16a73f4d0f7cc2726"} Dec 11 11:25:09 crc kubenswrapper[5016]: I1211 11:25:09.919251 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.328532668 podStartE2EDuration="51.919218355s" podCreationTimestamp="2025-12-11 11:24:18 +0000 UTC" firstStartedPulling="2025-12-11 11:24:20.72177935 +0000 UTC m=+2977.540338929" lastFinishedPulling="2025-12-11 11:25:08.312465037 +0000 UTC m=+3025.131024616" observedRunningTime="2025-12-11 11:25:09.906337328 +0000 UTC m=+3026.724896937" watchObservedRunningTime="2025-12-11 11:25:09.919218355 +0000 UTC m=+3026.737777964" Dec 11 11:26:12 crc kubenswrapper[5016]: I1211 11:26:12.932662 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:26:12 crc kubenswrapper[5016]: I1211 11:26:12.934564 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:26:42 crc kubenswrapper[5016]: I1211 11:26:42.932737 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:26:42 crc kubenswrapper[5016]: I1211 11:26:42.934517 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:27:12 crc kubenswrapper[5016]: I1211 11:27:12.933597 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:27:12 crc kubenswrapper[5016]: I1211 11:27:12.934358 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:27:12 crc kubenswrapper[5016]: I1211 11:27:12.934418 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 11:27:12 crc kubenswrapper[5016]: I1211 11:27:12.935388 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 11:27:12 crc kubenswrapper[5016]: I1211 11:27:12.935457 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" gracePeriod=600 Dec 11 11:27:13 crc kubenswrapper[5016]: E1211 11:27:13.072988 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:27:13 crc kubenswrapper[5016]: I1211 11:27:13.381594 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" exitCode=0 Dec 11 11:27:13 crc kubenswrapper[5016]: I1211 11:27:13.381650 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8"} Dec 11 11:27:13 crc kubenswrapper[5016]: I1211 11:27:13.381712 5016 scope.go:117] "RemoveContainer" containerID="4b2dcd166d06f6950e9f92f680a9105092928ed7c54642b432cb05388ec02325" Dec 11 11:27:13 crc kubenswrapper[5016]: I1211 11:27:13.382476 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:27:13 crc kubenswrapper[5016]: E1211 11:27:13.382860 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:27:24 crc kubenswrapper[5016]: I1211 11:27:24.474590 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:27:24 crc kubenswrapper[5016]: E1211 11:27:24.475522 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:27:28 crc kubenswrapper[5016]: I1211 11:27:28.765462 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-j4n52"] Dec 11 11:27:28 crc kubenswrapper[5016]: I1211 11:27:28.769322 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:28 crc kubenswrapper[5016]: I1211 11:27:28.785723 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j4n52"] Dec 11 11:27:28 crc kubenswrapper[5016]: I1211 11:27:28.944705 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7929b72-fdfe-4703-ab76-e99de50be310-catalog-content\") pod \"community-operators-j4n52\" (UID: \"f7929b72-fdfe-4703-ab76-e99de50be310\") " pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:28 crc kubenswrapper[5016]: I1211 11:27:28.944782 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7929b72-fdfe-4703-ab76-e99de50be310-utilities\") pod \"community-operators-j4n52\" (UID: \"f7929b72-fdfe-4703-ab76-e99de50be310\") " pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:28 crc kubenswrapper[5016]: I1211 11:27:28.944886 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsvv8\" (UniqueName: \"kubernetes.io/projected/f7929b72-fdfe-4703-ab76-e99de50be310-kube-api-access-rsvv8\") pod \"community-operators-j4n52\" (UID: \"f7929b72-fdfe-4703-ab76-e99de50be310\") " pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:29 crc kubenswrapper[5016]: I1211 11:27:29.046917 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsvv8\" (UniqueName: \"kubernetes.io/projected/f7929b72-fdfe-4703-ab76-e99de50be310-kube-api-access-rsvv8\") pod \"community-operators-j4n52\" (UID: \"f7929b72-fdfe-4703-ab76-e99de50be310\") " pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:29 crc kubenswrapper[5016]: I1211 11:27:29.047127 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7929b72-fdfe-4703-ab76-e99de50be310-catalog-content\") pod \"community-operators-j4n52\" (UID: \"f7929b72-fdfe-4703-ab76-e99de50be310\") " pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:29 crc kubenswrapper[5016]: I1211 11:27:29.047160 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7929b72-fdfe-4703-ab76-e99de50be310-utilities\") pod \"community-operators-j4n52\" (UID: \"f7929b72-fdfe-4703-ab76-e99de50be310\") " pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:29 crc kubenswrapper[5016]: I1211 11:27:29.047765 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7929b72-fdfe-4703-ab76-e99de50be310-utilities\") pod \"community-operators-j4n52\" (UID: \"f7929b72-fdfe-4703-ab76-e99de50be310\") " pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:29 crc kubenswrapper[5016]: I1211 11:27:29.047791 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7929b72-fdfe-4703-ab76-e99de50be310-catalog-content\") pod \"community-operators-j4n52\" (UID: \"f7929b72-fdfe-4703-ab76-e99de50be310\") " pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:29 crc kubenswrapper[5016]: I1211 11:27:29.071069 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsvv8\" (UniqueName: \"kubernetes.io/projected/f7929b72-fdfe-4703-ab76-e99de50be310-kube-api-access-rsvv8\") pod \"community-operators-j4n52\" (UID: \"f7929b72-fdfe-4703-ab76-e99de50be310\") " pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:29 crc kubenswrapper[5016]: I1211 11:27:29.099796 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:29 crc kubenswrapper[5016]: I1211 11:27:29.702898 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j4n52"] Dec 11 11:27:30 crc kubenswrapper[5016]: I1211 11:27:30.557785 5016 generic.go:334] "Generic (PLEG): container finished" podID="f7929b72-fdfe-4703-ab76-e99de50be310" containerID="fdb1716b8219a9c55ec974d6f75cf198fce709c941984d3de2f26fdf82801d63" exitCode=0 Dec 11 11:27:30 crc kubenswrapper[5016]: I1211 11:27:30.557908 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j4n52" event={"ID":"f7929b72-fdfe-4703-ab76-e99de50be310","Type":"ContainerDied","Data":"fdb1716b8219a9c55ec974d6f75cf198fce709c941984d3de2f26fdf82801d63"} Dec 11 11:27:30 crc kubenswrapper[5016]: I1211 11:27:30.558120 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j4n52" event={"ID":"f7929b72-fdfe-4703-ab76-e99de50be310","Type":"ContainerStarted","Data":"4bae118867a55bb780021d7e51bbbf355811d45ca294c9bd25abc82f8a5c5d90"} Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.369034 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9s5pf"] Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.371845 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.402924 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9s5pf"] Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.526671 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34dd8907-425f-4bb2-930f-e41636012eae-utilities\") pod \"redhat-operators-9s5pf\" (UID: \"34dd8907-425f-4bb2-930f-e41636012eae\") " pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.526802 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34dd8907-425f-4bb2-930f-e41636012eae-catalog-content\") pod \"redhat-operators-9s5pf\" (UID: \"34dd8907-425f-4bb2-930f-e41636012eae\") " pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.527091 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2l2v\" (UniqueName: \"kubernetes.io/projected/34dd8907-425f-4bb2-930f-e41636012eae-kube-api-access-b2l2v\") pod \"redhat-operators-9s5pf\" (UID: \"34dd8907-425f-4bb2-930f-e41636012eae\") " pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.586233 5016 generic.go:334] "Generic (PLEG): container finished" podID="f7929b72-fdfe-4703-ab76-e99de50be310" containerID="e47db1020b1bc960db2cfb9c30953ab9b08ab14d3c772c42cf3dad4abe334fbf" exitCode=0 Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.586282 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j4n52" event={"ID":"f7929b72-fdfe-4703-ab76-e99de50be310","Type":"ContainerDied","Data":"e47db1020b1bc960db2cfb9c30953ab9b08ab14d3c772c42cf3dad4abe334fbf"} Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.630686 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34dd8907-425f-4bb2-930f-e41636012eae-utilities\") pod \"redhat-operators-9s5pf\" (UID: \"34dd8907-425f-4bb2-930f-e41636012eae\") " pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.631115 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34dd8907-425f-4bb2-930f-e41636012eae-catalog-content\") pod \"redhat-operators-9s5pf\" (UID: \"34dd8907-425f-4bb2-930f-e41636012eae\") " pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.631614 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2l2v\" (UniqueName: \"kubernetes.io/projected/34dd8907-425f-4bb2-930f-e41636012eae-kube-api-access-b2l2v\") pod \"redhat-operators-9s5pf\" (UID: \"34dd8907-425f-4bb2-930f-e41636012eae\") " pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.631508 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34dd8907-425f-4bb2-930f-e41636012eae-catalog-content\") pod \"redhat-operators-9s5pf\" (UID: \"34dd8907-425f-4bb2-930f-e41636012eae\") " pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.631260 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34dd8907-425f-4bb2-930f-e41636012eae-utilities\") pod \"redhat-operators-9s5pf\" (UID: \"34dd8907-425f-4bb2-930f-e41636012eae\") " pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.661365 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2l2v\" (UniqueName: \"kubernetes.io/projected/34dd8907-425f-4bb2-930f-e41636012eae-kube-api-access-b2l2v\") pod \"redhat-operators-9s5pf\" (UID: \"34dd8907-425f-4bb2-930f-e41636012eae\") " pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:32 crc kubenswrapper[5016]: I1211 11:27:32.758667 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:33 crc kubenswrapper[5016]: I1211 11:27:33.367628 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9s5pf"] Dec 11 11:27:33 crc kubenswrapper[5016]: W1211 11:27:33.383377 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34dd8907_425f_4bb2_930f_e41636012eae.slice/crio-48076258a032539bd92ba17778c2091f9a0baf8e0c96e3515137d8948524f366 WatchSource:0}: Error finding container 48076258a032539bd92ba17778c2091f9a0baf8e0c96e3515137d8948524f366: Status 404 returned error can't find the container with id 48076258a032539bd92ba17778c2091f9a0baf8e0c96e3515137d8948524f366 Dec 11 11:27:33 crc kubenswrapper[5016]: I1211 11:27:33.607650 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s5pf" event={"ID":"34dd8907-425f-4bb2-930f-e41636012eae","Type":"ContainerStarted","Data":"48076258a032539bd92ba17778c2091f9a0baf8e0c96e3515137d8948524f366"} Dec 11 11:27:34 crc kubenswrapper[5016]: I1211 11:27:34.622592 5016 generic.go:334] "Generic (PLEG): container finished" podID="34dd8907-425f-4bb2-930f-e41636012eae" containerID="c2ab3606e5d0be9cd6a672ca1079d3e930ab8253b66d33dd6e0946c7da3c7532" exitCode=0 Dec 11 11:27:34 crc kubenswrapper[5016]: I1211 11:27:34.622717 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s5pf" event={"ID":"34dd8907-425f-4bb2-930f-e41636012eae","Type":"ContainerDied","Data":"c2ab3606e5d0be9cd6a672ca1079d3e930ab8253b66d33dd6e0946c7da3c7532"} Dec 11 11:27:34 crc kubenswrapper[5016]: I1211 11:27:34.630275 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j4n52" event={"ID":"f7929b72-fdfe-4703-ab76-e99de50be310","Type":"ContainerStarted","Data":"a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868"} Dec 11 11:27:35 crc kubenswrapper[5016]: I1211 11:27:35.641932 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s5pf" event={"ID":"34dd8907-425f-4bb2-930f-e41636012eae","Type":"ContainerStarted","Data":"1a43399c94cdbbd21bebc4cb9c40381c4f9016c489e653834cf22ffad92a57bf"} Dec 11 11:27:35 crc kubenswrapper[5016]: I1211 11:27:35.674830 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-j4n52" podStartSLOduration=4.35914038 podStartE2EDuration="7.674807915s" podCreationTimestamp="2025-12-11 11:27:28 +0000 UTC" firstStartedPulling="2025-12-11 11:27:30.560193992 +0000 UTC m=+3167.378753571" lastFinishedPulling="2025-12-11 11:27:33.875861527 +0000 UTC m=+3170.694421106" observedRunningTime="2025-12-11 11:27:34.65925111 +0000 UTC m=+3171.477810699" watchObservedRunningTime="2025-12-11 11:27:35.674807915 +0000 UTC m=+3172.493367494" Dec 11 11:27:37 crc kubenswrapper[5016]: I1211 11:27:37.475250 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:27:37 crc kubenswrapper[5016]: E1211 11:27:37.475993 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:27:37 crc kubenswrapper[5016]: I1211 11:27:37.665985 5016 generic.go:334] "Generic (PLEG): container finished" podID="34dd8907-425f-4bb2-930f-e41636012eae" containerID="1a43399c94cdbbd21bebc4cb9c40381c4f9016c489e653834cf22ffad92a57bf" exitCode=0 Dec 11 11:27:37 crc kubenswrapper[5016]: I1211 11:27:37.666051 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s5pf" event={"ID":"34dd8907-425f-4bb2-930f-e41636012eae","Type":"ContainerDied","Data":"1a43399c94cdbbd21bebc4cb9c40381c4f9016c489e653834cf22ffad92a57bf"} Dec 11 11:27:38 crc kubenswrapper[5016]: I1211 11:27:38.681982 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s5pf" event={"ID":"34dd8907-425f-4bb2-930f-e41636012eae","Type":"ContainerStarted","Data":"f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4"} Dec 11 11:27:38 crc kubenswrapper[5016]: I1211 11:27:38.702793 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9s5pf" podStartSLOduration=3.171941453 podStartE2EDuration="6.702769693s" podCreationTimestamp="2025-12-11 11:27:32 +0000 UTC" firstStartedPulling="2025-12-11 11:27:34.625637934 +0000 UTC m=+3171.444197513" lastFinishedPulling="2025-12-11 11:27:38.156466174 +0000 UTC m=+3174.975025753" observedRunningTime="2025-12-11 11:27:38.701908071 +0000 UTC m=+3175.520467670" watchObservedRunningTime="2025-12-11 11:27:38.702769693 +0000 UTC m=+3175.521329282" Dec 11 11:27:39 crc kubenswrapper[5016]: I1211 11:27:39.100841 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:39 crc kubenswrapper[5016]: I1211 11:27:39.101477 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:39 crc kubenswrapper[5016]: I1211 11:27:39.152584 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:39 crc kubenswrapper[5016]: I1211 11:27:39.736733 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:41 crc kubenswrapper[5016]: I1211 11:27:41.358315 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j4n52"] Dec 11 11:27:42 crc kubenswrapper[5016]: I1211 11:27:42.725548 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-j4n52" podUID="f7929b72-fdfe-4703-ab76-e99de50be310" containerName="registry-server" containerID="cri-o://a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868" gracePeriod=2 Dec 11 11:27:42 crc kubenswrapper[5016]: I1211 11:27:42.759231 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:42 crc kubenswrapper[5016]: I1211 11:27:42.759329 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.215094 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.370362 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7929b72-fdfe-4703-ab76-e99de50be310-utilities\") pod \"f7929b72-fdfe-4703-ab76-e99de50be310\" (UID: \"f7929b72-fdfe-4703-ab76-e99de50be310\") " Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.370697 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsvv8\" (UniqueName: \"kubernetes.io/projected/f7929b72-fdfe-4703-ab76-e99de50be310-kube-api-access-rsvv8\") pod \"f7929b72-fdfe-4703-ab76-e99de50be310\" (UID: \"f7929b72-fdfe-4703-ab76-e99de50be310\") " Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.370922 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7929b72-fdfe-4703-ab76-e99de50be310-catalog-content\") pod \"f7929b72-fdfe-4703-ab76-e99de50be310\" (UID: \"f7929b72-fdfe-4703-ab76-e99de50be310\") " Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.371014 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7929b72-fdfe-4703-ab76-e99de50be310-utilities" (OuterVolumeSpecName: "utilities") pod "f7929b72-fdfe-4703-ab76-e99de50be310" (UID: "f7929b72-fdfe-4703-ab76-e99de50be310"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.371762 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7929b72-fdfe-4703-ab76-e99de50be310-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.378147 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7929b72-fdfe-4703-ab76-e99de50be310-kube-api-access-rsvv8" (OuterVolumeSpecName: "kube-api-access-rsvv8") pod "f7929b72-fdfe-4703-ab76-e99de50be310" (UID: "f7929b72-fdfe-4703-ab76-e99de50be310"). InnerVolumeSpecName "kube-api-access-rsvv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.424878 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7929b72-fdfe-4703-ab76-e99de50be310-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7929b72-fdfe-4703-ab76-e99de50be310" (UID: "f7929b72-fdfe-4703-ab76-e99de50be310"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.475395 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsvv8\" (UniqueName: \"kubernetes.io/projected/f7929b72-fdfe-4703-ab76-e99de50be310-kube-api-access-rsvv8\") on node \"crc\" DevicePath \"\"" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.475436 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7929b72-fdfe-4703-ab76-e99de50be310-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.736009 5016 generic.go:334] "Generic (PLEG): container finished" podID="f7929b72-fdfe-4703-ab76-e99de50be310" containerID="a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868" exitCode=0 Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.736073 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j4n52" event={"ID":"f7929b72-fdfe-4703-ab76-e99de50be310","Type":"ContainerDied","Data":"a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868"} Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.736103 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j4n52" event={"ID":"f7929b72-fdfe-4703-ab76-e99de50be310","Type":"ContainerDied","Data":"4bae118867a55bb780021d7e51bbbf355811d45ca294c9bd25abc82f8a5c5d90"} Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.736121 5016 scope.go:117] "RemoveContainer" containerID="a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.736260 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j4n52" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.760622 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j4n52"] Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.766262 5016 scope.go:117] "RemoveContainer" containerID="e47db1020b1bc960db2cfb9c30953ab9b08ab14d3c772c42cf3dad4abe334fbf" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.770363 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-j4n52"] Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.790408 5016 scope.go:117] "RemoveContainer" containerID="fdb1716b8219a9c55ec974d6f75cf198fce709c941984d3de2f26fdf82801d63" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.803838 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9s5pf" podUID="34dd8907-425f-4bb2-930f-e41636012eae" containerName="registry-server" probeResult="failure" output=< Dec 11 11:27:43 crc kubenswrapper[5016]: timeout: failed to connect service ":50051" within 1s Dec 11 11:27:43 crc kubenswrapper[5016]: > Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.848155 5016 scope.go:117] "RemoveContainer" containerID="a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868" Dec 11 11:27:43 crc kubenswrapper[5016]: E1211 11:27:43.848635 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868\": container with ID starting with a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868 not found: ID does not exist" containerID="a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.848773 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868"} err="failed to get container status \"a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868\": rpc error: code = NotFound desc = could not find container \"a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868\": container with ID starting with a4488e80f9f4448930bb99f609710caae3e8d7aa5a6ff94d3e74d9972da1f868 not found: ID does not exist" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.848894 5016 scope.go:117] "RemoveContainer" containerID="e47db1020b1bc960db2cfb9c30953ab9b08ab14d3c772c42cf3dad4abe334fbf" Dec 11 11:27:43 crc kubenswrapper[5016]: E1211 11:27:43.849409 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e47db1020b1bc960db2cfb9c30953ab9b08ab14d3c772c42cf3dad4abe334fbf\": container with ID starting with e47db1020b1bc960db2cfb9c30953ab9b08ab14d3c772c42cf3dad4abe334fbf not found: ID does not exist" containerID="e47db1020b1bc960db2cfb9c30953ab9b08ab14d3c772c42cf3dad4abe334fbf" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.849437 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e47db1020b1bc960db2cfb9c30953ab9b08ab14d3c772c42cf3dad4abe334fbf"} err="failed to get container status \"e47db1020b1bc960db2cfb9c30953ab9b08ab14d3c772c42cf3dad4abe334fbf\": rpc error: code = NotFound desc = could not find container \"e47db1020b1bc960db2cfb9c30953ab9b08ab14d3c772c42cf3dad4abe334fbf\": container with ID starting with e47db1020b1bc960db2cfb9c30953ab9b08ab14d3c772c42cf3dad4abe334fbf not found: ID does not exist" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.849454 5016 scope.go:117] "RemoveContainer" containerID="fdb1716b8219a9c55ec974d6f75cf198fce709c941984d3de2f26fdf82801d63" Dec 11 11:27:43 crc kubenswrapper[5016]: E1211 11:27:43.849741 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdb1716b8219a9c55ec974d6f75cf198fce709c941984d3de2f26fdf82801d63\": container with ID starting with fdb1716b8219a9c55ec974d6f75cf198fce709c941984d3de2f26fdf82801d63 not found: ID does not exist" containerID="fdb1716b8219a9c55ec974d6f75cf198fce709c941984d3de2f26fdf82801d63" Dec 11 11:27:43 crc kubenswrapper[5016]: I1211 11:27:43.849783 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdb1716b8219a9c55ec974d6f75cf198fce709c941984d3de2f26fdf82801d63"} err="failed to get container status \"fdb1716b8219a9c55ec974d6f75cf198fce709c941984d3de2f26fdf82801d63\": rpc error: code = NotFound desc = could not find container \"fdb1716b8219a9c55ec974d6f75cf198fce709c941984d3de2f26fdf82801d63\": container with ID starting with fdb1716b8219a9c55ec974d6f75cf198fce709c941984d3de2f26fdf82801d63 not found: ID does not exist" Dec 11 11:27:45 crc kubenswrapper[5016]: E1211 11:27:45.301659 5016 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7929b72_fdfe_4703_ab76_e99de50be310.slice/crio-4bae118867a55bb780021d7e51bbbf355811d45ca294c9bd25abc82f8a5c5d90\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7929b72_fdfe_4703_ab76_e99de50be310.slice\": RecentStats: unable to find data in memory cache]" Dec 11 11:27:45 crc kubenswrapper[5016]: I1211 11:27:45.485575 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7929b72-fdfe-4703-ab76-e99de50be310" path="/var/lib/kubelet/pods/f7929b72-fdfe-4703-ab76-e99de50be310/volumes" Dec 11 11:27:50 crc kubenswrapper[5016]: I1211 11:27:50.478236 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:27:50 crc kubenswrapper[5016]: E1211 11:27:50.479240 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:27:52 crc kubenswrapper[5016]: I1211 11:27:52.819445 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:52 crc kubenswrapper[5016]: I1211 11:27:52.900176 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:53 crc kubenswrapper[5016]: I1211 11:27:53.060492 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9s5pf"] Dec 11 11:27:54 crc kubenswrapper[5016]: I1211 11:27:54.840900 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9s5pf" podUID="34dd8907-425f-4bb2-930f-e41636012eae" containerName="registry-server" containerID="cri-o://f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4" gracePeriod=2 Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.368875 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.520548 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34dd8907-425f-4bb2-930f-e41636012eae-utilities\") pod \"34dd8907-425f-4bb2-930f-e41636012eae\" (UID: \"34dd8907-425f-4bb2-930f-e41636012eae\") " Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.520688 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34dd8907-425f-4bb2-930f-e41636012eae-catalog-content\") pod \"34dd8907-425f-4bb2-930f-e41636012eae\" (UID: \"34dd8907-425f-4bb2-930f-e41636012eae\") " Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.520820 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2l2v\" (UniqueName: \"kubernetes.io/projected/34dd8907-425f-4bb2-930f-e41636012eae-kube-api-access-b2l2v\") pod \"34dd8907-425f-4bb2-930f-e41636012eae\" (UID: \"34dd8907-425f-4bb2-930f-e41636012eae\") " Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.521220 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34dd8907-425f-4bb2-930f-e41636012eae-utilities" (OuterVolumeSpecName: "utilities") pod "34dd8907-425f-4bb2-930f-e41636012eae" (UID: "34dd8907-425f-4bb2-930f-e41636012eae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.526766 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34dd8907-425f-4bb2-930f-e41636012eae-kube-api-access-b2l2v" (OuterVolumeSpecName: "kube-api-access-b2l2v") pod "34dd8907-425f-4bb2-930f-e41636012eae" (UID: "34dd8907-425f-4bb2-930f-e41636012eae"). InnerVolumeSpecName "kube-api-access-b2l2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:27:55 crc kubenswrapper[5016]: E1211 11:27:55.586794 5016 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7929b72_fdfe_4703_ab76_e99de50be310.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7929b72_fdfe_4703_ab76_e99de50be310.slice/crio-4bae118867a55bb780021d7e51bbbf355811d45ca294c9bd25abc82f8a5c5d90\": RecentStats: unable to find data in memory cache]" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.623463 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2l2v\" (UniqueName: \"kubernetes.io/projected/34dd8907-425f-4bb2-930f-e41636012eae-kube-api-access-b2l2v\") on node \"crc\" DevicePath \"\"" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.623501 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/34dd8907-425f-4bb2-930f-e41636012eae-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.654934 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34dd8907-425f-4bb2-930f-e41636012eae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "34dd8907-425f-4bb2-930f-e41636012eae" (UID: "34dd8907-425f-4bb2-930f-e41636012eae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.725856 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/34dd8907-425f-4bb2-930f-e41636012eae-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.855701 5016 generic.go:334] "Generic (PLEG): container finished" podID="34dd8907-425f-4bb2-930f-e41636012eae" containerID="f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4" exitCode=0 Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.855757 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9s5pf" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.855808 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s5pf" event={"ID":"34dd8907-425f-4bb2-930f-e41636012eae","Type":"ContainerDied","Data":"f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4"} Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.856148 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s5pf" event={"ID":"34dd8907-425f-4bb2-930f-e41636012eae","Type":"ContainerDied","Data":"48076258a032539bd92ba17778c2091f9a0baf8e0c96e3515137d8948524f366"} Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.856179 5016 scope.go:117] "RemoveContainer" containerID="f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.880290 5016 scope.go:117] "RemoveContainer" containerID="1a43399c94cdbbd21bebc4cb9c40381c4f9016c489e653834cf22ffad92a57bf" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.896280 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9s5pf"] Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.905756 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9s5pf"] Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.921322 5016 scope.go:117] "RemoveContainer" containerID="c2ab3606e5d0be9cd6a672ca1079d3e930ab8253b66d33dd6e0946c7da3c7532" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.945387 5016 scope.go:117] "RemoveContainer" containerID="f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4" Dec 11 11:27:55 crc kubenswrapper[5016]: E1211 11:27:55.945952 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4\": container with ID starting with f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4 not found: ID does not exist" containerID="f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.945993 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4"} err="failed to get container status \"f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4\": rpc error: code = NotFound desc = could not find container \"f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4\": container with ID starting with f1312bd5e256522fb8269086400840efaefa4abe2ed8389e6d71a91cd49d72e4 not found: ID does not exist" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.946022 5016 scope.go:117] "RemoveContainer" containerID="1a43399c94cdbbd21bebc4cb9c40381c4f9016c489e653834cf22ffad92a57bf" Dec 11 11:27:55 crc kubenswrapper[5016]: E1211 11:27:55.946385 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a43399c94cdbbd21bebc4cb9c40381c4f9016c489e653834cf22ffad92a57bf\": container with ID starting with 1a43399c94cdbbd21bebc4cb9c40381c4f9016c489e653834cf22ffad92a57bf not found: ID does not exist" containerID="1a43399c94cdbbd21bebc4cb9c40381c4f9016c489e653834cf22ffad92a57bf" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.946430 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a43399c94cdbbd21bebc4cb9c40381c4f9016c489e653834cf22ffad92a57bf"} err="failed to get container status \"1a43399c94cdbbd21bebc4cb9c40381c4f9016c489e653834cf22ffad92a57bf\": rpc error: code = NotFound desc = could not find container \"1a43399c94cdbbd21bebc4cb9c40381c4f9016c489e653834cf22ffad92a57bf\": container with ID starting with 1a43399c94cdbbd21bebc4cb9c40381c4f9016c489e653834cf22ffad92a57bf not found: ID does not exist" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.946463 5016 scope.go:117] "RemoveContainer" containerID="c2ab3606e5d0be9cd6a672ca1079d3e930ab8253b66d33dd6e0946c7da3c7532" Dec 11 11:27:55 crc kubenswrapper[5016]: E1211 11:27:55.946797 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2ab3606e5d0be9cd6a672ca1079d3e930ab8253b66d33dd6e0946c7da3c7532\": container with ID starting with c2ab3606e5d0be9cd6a672ca1079d3e930ab8253b66d33dd6e0946c7da3c7532 not found: ID does not exist" containerID="c2ab3606e5d0be9cd6a672ca1079d3e930ab8253b66d33dd6e0946c7da3c7532" Dec 11 11:27:55 crc kubenswrapper[5016]: I1211 11:27:55.946826 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2ab3606e5d0be9cd6a672ca1079d3e930ab8253b66d33dd6e0946c7da3c7532"} err="failed to get container status \"c2ab3606e5d0be9cd6a672ca1079d3e930ab8253b66d33dd6e0946c7da3c7532\": rpc error: code = NotFound desc = could not find container \"c2ab3606e5d0be9cd6a672ca1079d3e930ab8253b66d33dd6e0946c7da3c7532\": container with ID starting with c2ab3606e5d0be9cd6a672ca1079d3e930ab8253b66d33dd6e0946c7da3c7532 not found: ID does not exist" Dec 11 11:27:57 crc kubenswrapper[5016]: I1211 11:27:57.486482 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34dd8907-425f-4bb2-930f-e41636012eae" path="/var/lib/kubelet/pods/34dd8907-425f-4bb2-930f-e41636012eae/volumes" Dec 11 11:28:04 crc kubenswrapper[5016]: I1211 11:28:04.475069 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:28:04 crc kubenswrapper[5016]: E1211 11:28:04.475871 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:28:05 crc kubenswrapper[5016]: E1211 11:28:05.823238 5016 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7929b72_fdfe_4703_ab76_e99de50be310.slice/crio-4bae118867a55bb780021d7e51bbbf355811d45ca294c9bd25abc82f8a5c5d90\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7929b72_fdfe_4703_ab76_e99de50be310.slice\": RecentStats: unable to find data in memory cache]" Dec 11 11:28:15 crc kubenswrapper[5016]: I1211 11:28:15.474914 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:28:15 crc kubenswrapper[5016]: E1211 11:28:15.475720 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:28:16 crc kubenswrapper[5016]: E1211 11:28:16.066703 5016 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7929b72_fdfe_4703_ab76_e99de50be310.slice/crio-4bae118867a55bb780021d7e51bbbf355811d45ca294c9bd25abc82f8a5c5d90\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7929b72_fdfe_4703_ab76_e99de50be310.slice\": RecentStats: unable to find data in memory cache]" Dec 11 11:28:26 crc kubenswrapper[5016]: E1211 11:28:26.316459 5016 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7929b72_fdfe_4703_ab76_e99de50be310.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7929b72_fdfe_4703_ab76_e99de50be310.slice/crio-4bae118867a55bb780021d7e51bbbf355811d45ca294c9bd25abc82f8a5c5d90\": RecentStats: unable to find data in memory cache]" Dec 11 11:28:30 crc kubenswrapper[5016]: I1211 11:28:30.476060 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:28:30 crc kubenswrapper[5016]: E1211 11:28:30.477564 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:28:36 crc kubenswrapper[5016]: E1211 11:28:36.571363 5016 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7929b72_fdfe_4703_ab76_e99de50be310.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7929b72_fdfe_4703_ab76_e99de50be310.slice/crio-4bae118867a55bb780021d7e51bbbf355811d45ca294c9bd25abc82f8a5c5d90\": RecentStats: unable to find data in memory cache]" Dec 11 11:28:43 crc kubenswrapper[5016]: E1211 11:28:43.512993 5016 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/8cd9e68f66067b74fea5938c1c8345ddf7fe4dfdcca850c7dcc2a692fb105a49/diff" to get inode usage: stat /var/lib/containers/storage/overlay/8cd9e68f66067b74fea5938c1c8345ddf7fe4dfdcca850c7dcc2a692fb105a49/diff: no such file or directory, extraDiskErr: Dec 11 11:28:44 crc kubenswrapper[5016]: I1211 11:28:44.474647 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:28:44 crc kubenswrapper[5016]: E1211 11:28:44.475182 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:28:58 crc kubenswrapper[5016]: I1211 11:28:58.474387 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:28:58 crc kubenswrapper[5016]: E1211 11:28:58.475415 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:29:13 crc kubenswrapper[5016]: I1211 11:29:13.481995 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:29:13 crc kubenswrapper[5016]: E1211 11:29:13.482720 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:29:25 crc kubenswrapper[5016]: I1211 11:29:25.474874 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:29:25 crc kubenswrapper[5016]: E1211 11:29:25.475648 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:29:38 crc kubenswrapper[5016]: I1211 11:29:38.475010 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:29:38 crc kubenswrapper[5016]: E1211 11:29:38.475918 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:29:53 crc kubenswrapper[5016]: I1211 11:29:53.481296 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:29:53 crc kubenswrapper[5016]: E1211 11:29:53.482085 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.156124 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7"] Dec 11 11:30:00 crc kubenswrapper[5016]: E1211 11:30:00.157143 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34dd8907-425f-4bb2-930f-e41636012eae" containerName="extract-content" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.157162 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="34dd8907-425f-4bb2-930f-e41636012eae" containerName="extract-content" Dec 11 11:30:00 crc kubenswrapper[5016]: E1211 11:30:00.157179 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7929b72-fdfe-4703-ab76-e99de50be310" containerName="extract-content" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.157189 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7929b72-fdfe-4703-ab76-e99de50be310" containerName="extract-content" Dec 11 11:30:00 crc kubenswrapper[5016]: E1211 11:30:00.157206 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34dd8907-425f-4bb2-930f-e41636012eae" containerName="extract-utilities" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.157214 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="34dd8907-425f-4bb2-930f-e41636012eae" containerName="extract-utilities" Dec 11 11:30:00 crc kubenswrapper[5016]: E1211 11:30:00.157226 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7929b72-fdfe-4703-ab76-e99de50be310" containerName="registry-server" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.157233 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7929b72-fdfe-4703-ab76-e99de50be310" containerName="registry-server" Dec 11 11:30:00 crc kubenswrapper[5016]: E1211 11:30:00.157250 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7929b72-fdfe-4703-ab76-e99de50be310" containerName="extract-utilities" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.157255 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7929b72-fdfe-4703-ab76-e99de50be310" containerName="extract-utilities" Dec 11 11:30:00 crc kubenswrapper[5016]: E1211 11:30:00.157267 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34dd8907-425f-4bb2-930f-e41636012eae" containerName="registry-server" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.157272 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="34dd8907-425f-4bb2-930f-e41636012eae" containerName="registry-server" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.157448 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7929b72-fdfe-4703-ab76-e99de50be310" containerName="registry-server" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.157458 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="34dd8907-425f-4bb2-930f-e41636012eae" containerName="registry-server" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.158229 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.161304 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.161314 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.167872 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7"] Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.251499 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-secret-volume\") pod \"collect-profiles-29424210-r96s7\" (UID: \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.251619 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-config-volume\") pod \"collect-profiles-29424210-r96s7\" (UID: \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.251651 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrsm6\" (UniqueName: \"kubernetes.io/projected/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-kube-api-access-wrsm6\") pod \"collect-profiles-29424210-r96s7\" (UID: \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.353628 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-config-volume\") pod \"collect-profiles-29424210-r96s7\" (UID: \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.353712 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrsm6\" (UniqueName: \"kubernetes.io/projected/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-kube-api-access-wrsm6\") pod \"collect-profiles-29424210-r96s7\" (UID: \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.353933 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-secret-volume\") pod \"collect-profiles-29424210-r96s7\" (UID: \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.354681 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-config-volume\") pod \"collect-profiles-29424210-r96s7\" (UID: \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.360793 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-secret-volume\") pod \"collect-profiles-29424210-r96s7\" (UID: \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.372531 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrsm6\" (UniqueName: \"kubernetes.io/projected/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-kube-api-access-wrsm6\") pod \"collect-profiles-29424210-r96s7\" (UID: \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.486516 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:00 crc kubenswrapper[5016]: I1211 11:30:00.941081 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7"] Dec 11 11:30:01 crc kubenswrapper[5016]: I1211 11:30:01.159578 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" event={"ID":"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02","Type":"ContainerStarted","Data":"6750833f724e05fba681b4c6f5874c6c391e1ea90494d5a931cdb883486358d7"} Dec 11 11:30:01 crc kubenswrapper[5016]: I1211 11:30:01.159636 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" event={"ID":"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02","Type":"ContainerStarted","Data":"97a05113af80ef52cccf78d7947c8e462db7b9d661e78ab94aa5832f8db2919a"} Dec 11 11:30:01 crc kubenswrapper[5016]: I1211 11:30:01.184916 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" podStartSLOduration=1.184893542 podStartE2EDuration="1.184893542s" podCreationTimestamp="2025-12-11 11:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 11:30:01.1794908 +0000 UTC m=+3317.998050389" watchObservedRunningTime="2025-12-11 11:30:01.184893542 +0000 UTC m=+3318.003453121" Dec 11 11:30:02 crc kubenswrapper[5016]: I1211 11:30:02.168745 5016 generic.go:334] "Generic (PLEG): container finished" podID="e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02" containerID="6750833f724e05fba681b4c6f5874c6c391e1ea90494d5a931cdb883486358d7" exitCode=0 Dec 11 11:30:02 crc kubenswrapper[5016]: I1211 11:30:02.168803 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" event={"ID":"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02","Type":"ContainerDied","Data":"6750833f724e05fba681b4c6f5874c6c391e1ea90494d5a931cdb883486358d7"} Dec 11 11:30:03 crc kubenswrapper[5016]: I1211 11:30:03.553912 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:03 crc kubenswrapper[5016]: I1211 11:30:03.719810 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrsm6\" (UniqueName: \"kubernetes.io/projected/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-kube-api-access-wrsm6\") pod \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\" (UID: \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\") " Dec 11 11:30:03 crc kubenswrapper[5016]: I1211 11:30:03.719924 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-config-volume\") pod \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\" (UID: \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\") " Dec 11 11:30:03 crc kubenswrapper[5016]: I1211 11:30:03.720546 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-secret-volume\") pod \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\" (UID: \"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02\") " Dec 11 11:30:03 crc kubenswrapper[5016]: I1211 11:30:03.720485 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-config-volume" (OuterVolumeSpecName: "config-volume") pod "e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02" (UID: "e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:30:03 crc kubenswrapper[5016]: I1211 11:30:03.721802 5016 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 11:30:03 crc kubenswrapper[5016]: I1211 11:30:03.726429 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02" (UID: "e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:30:03 crc kubenswrapper[5016]: I1211 11:30:03.727065 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-kube-api-access-wrsm6" (OuterVolumeSpecName: "kube-api-access-wrsm6") pod "e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02" (UID: "e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02"). InnerVolumeSpecName "kube-api-access-wrsm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:30:03 crc kubenswrapper[5016]: I1211 11:30:03.824647 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrsm6\" (UniqueName: \"kubernetes.io/projected/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-kube-api-access-wrsm6\") on node \"crc\" DevicePath \"\"" Dec 11 11:30:03 crc kubenswrapper[5016]: I1211 11:30:03.824721 5016 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 11:30:04 crc kubenswrapper[5016]: I1211 11:30:04.188101 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" event={"ID":"e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02","Type":"ContainerDied","Data":"97a05113af80ef52cccf78d7947c8e462db7b9d661e78ab94aa5832f8db2919a"} Dec 11 11:30:04 crc kubenswrapper[5016]: I1211 11:30:04.188156 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97a05113af80ef52cccf78d7947c8e462db7b9d661e78ab94aa5832f8db2919a" Dec 11 11:30:04 crc kubenswrapper[5016]: I1211 11:30:04.188197 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424210-r96s7" Dec 11 11:30:04 crc kubenswrapper[5016]: I1211 11:30:04.247889 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t"] Dec 11 11:30:04 crc kubenswrapper[5016]: I1211 11:30:04.256185 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424165-qm22t"] Dec 11 11:30:05 crc kubenswrapper[5016]: I1211 11:30:05.474772 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:30:05 crc kubenswrapper[5016]: E1211 11:30:05.475371 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:30:05 crc kubenswrapper[5016]: I1211 11:30:05.486860 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f1c3e35-cef5-4190-a349-b4490f3fe796" path="/var/lib/kubelet/pods/2f1c3e35-cef5-4190-a349-b4490f3fe796/volumes" Dec 11 11:30:12 crc kubenswrapper[5016]: I1211 11:30:12.674961 5016 scope.go:117] "RemoveContainer" containerID="d119c1b272397ca659d78902d5b3912b25e8ec6137dc801785742cdead21bfbf" Dec 11 11:30:20 crc kubenswrapper[5016]: I1211 11:30:20.475001 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:30:20 crc kubenswrapper[5016]: E1211 11:30:20.475924 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:30:31 crc kubenswrapper[5016]: I1211 11:30:31.475736 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:30:31 crc kubenswrapper[5016]: E1211 11:30:31.476599 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:30:43 crc kubenswrapper[5016]: I1211 11:30:43.485646 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:30:43 crc kubenswrapper[5016]: E1211 11:30:43.486404 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:30:54 crc kubenswrapper[5016]: I1211 11:30:54.474908 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:30:54 crc kubenswrapper[5016]: E1211 11:30:54.478773 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:31:05 crc kubenswrapper[5016]: I1211 11:31:05.474883 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:31:05 crc kubenswrapper[5016]: E1211 11:31:05.475638 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:31:20 crc kubenswrapper[5016]: I1211 11:31:20.474519 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:31:20 crc kubenswrapper[5016]: E1211 11:31:20.475518 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:31:34 crc kubenswrapper[5016]: I1211 11:31:34.486645 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:31:34 crc kubenswrapper[5016]: E1211 11:31:34.495290 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:31:46 crc kubenswrapper[5016]: I1211 11:31:46.474817 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:31:46 crc kubenswrapper[5016]: E1211 11:31:46.475839 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:32:00 crc kubenswrapper[5016]: I1211 11:32:00.474898 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:32:00 crc kubenswrapper[5016]: E1211 11:32:00.475709 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.436258 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5pd4c"] Dec 11 11:32:01 crc kubenswrapper[5016]: E1211 11:32:01.437015 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02" containerName="collect-profiles" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.437034 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02" containerName="collect-profiles" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.437220 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0ae4f1f-b8bd-4d77-b79a-ece1fcb08f02" containerName="collect-profiles" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.438962 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.473467 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5pd4c"] Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.614199 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdll4\" (UniqueName: \"kubernetes.io/projected/12253252-0d50-462c-b62a-3c75482c54a8-kube-api-access-wdll4\") pod \"redhat-marketplace-5pd4c\" (UID: \"12253252-0d50-462c-b62a-3c75482c54a8\") " pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.614256 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12253252-0d50-462c-b62a-3c75482c54a8-catalog-content\") pod \"redhat-marketplace-5pd4c\" (UID: \"12253252-0d50-462c-b62a-3c75482c54a8\") " pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.614354 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12253252-0d50-462c-b62a-3c75482c54a8-utilities\") pod \"redhat-marketplace-5pd4c\" (UID: \"12253252-0d50-462c-b62a-3c75482c54a8\") " pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.715720 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12253252-0d50-462c-b62a-3c75482c54a8-utilities\") pod \"redhat-marketplace-5pd4c\" (UID: \"12253252-0d50-462c-b62a-3c75482c54a8\") " pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.715882 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdll4\" (UniqueName: \"kubernetes.io/projected/12253252-0d50-462c-b62a-3c75482c54a8-kube-api-access-wdll4\") pod \"redhat-marketplace-5pd4c\" (UID: \"12253252-0d50-462c-b62a-3c75482c54a8\") " pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.715902 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12253252-0d50-462c-b62a-3c75482c54a8-catalog-content\") pod \"redhat-marketplace-5pd4c\" (UID: \"12253252-0d50-462c-b62a-3c75482c54a8\") " pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.716310 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12253252-0d50-462c-b62a-3c75482c54a8-utilities\") pod \"redhat-marketplace-5pd4c\" (UID: \"12253252-0d50-462c-b62a-3c75482c54a8\") " pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.716340 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12253252-0d50-462c-b62a-3c75482c54a8-catalog-content\") pod \"redhat-marketplace-5pd4c\" (UID: \"12253252-0d50-462c-b62a-3c75482c54a8\") " pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.748650 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdll4\" (UniqueName: \"kubernetes.io/projected/12253252-0d50-462c-b62a-3c75482c54a8-kube-api-access-wdll4\") pod \"redhat-marketplace-5pd4c\" (UID: \"12253252-0d50-462c-b62a-3c75482c54a8\") " pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:01 crc kubenswrapper[5016]: I1211 11:32:01.762434 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:02 crc kubenswrapper[5016]: I1211 11:32:02.274674 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5pd4c"] Dec 11 11:32:02 crc kubenswrapper[5016]: I1211 11:32:02.288414 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5pd4c" event={"ID":"12253252-0d50-462c-b62a-3c75482c54a8","Type":"ContainerStarted","Data":"17fe72b430343e9bcf9e8c8f9664f25a699951e3645e1ead76c695707c9f7b20"} Dec 11 11:32:03 crc kubenswrapper[5016]: I1211 11:32:03.299366 5016 generic.go:334] "Generic (PLEG): container finished" podID="12253252-0d50-462c-b62a-3c75482c54a8" containerID="a4973709c7b21ee0e1fe2eab36cf844b88e93cbcc0434ceaf09e11e21c21330d" exitCode=0 Dec 11 11:32:03 crc kubenswrapper[5016]: I1211 11:32:03.299485 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5pd4c" event={"ID":"12253252-0d50-462c-b62a-3c75482c54a8","Type":"ContainerDied","Data":"a4973709c7b21ee0e1fe2eab36cf844b88e93cbcc0434ceaf09e11e21c21330d"} Dec 11 11:32:03 crc kubenswrapper[5016]: I1211 11:32:03.303972 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 11:32:05 crc kubenswrapper[5016]: I1211 11:32:05.320100 5016 generic.go:334] "Generic (PLEG): container finished" podID="12253252-0d50-462c-b62a-3c75482c54a8" containerID="1d3514ab3f5b9f7066bee25591b78a0424042e24d1ad17a017ac52241d902bb2" exitCode=0 Dec 11 11:32:05 crc kubenswrapper[5016]: I1211 11:32:05.320221 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5pd4c" event={"ID":"12253252-0d50-462c-b62a-3c75482c54a8","Type":"ContainerDied","Data":"1d3514ab3f5b9f7066bee25591b78a0424042e24d1ad17a017ac52241d902bb2"} Dec 11 11:32:06 crc kubenswrapper[5016]: I1211 11:32:06.334780 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5pd4c" event={"ID":"12253252-0d50-462c-b62a-3c75482c54a8","Type":"ContainerStarted","Data":"02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea"} Dec 11 11:32:11 crc kubenswrapper[5016]: I1211 11:32:11.764547 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:11 crc kubenswrapper[5016]: I1211 11:32:11.765575 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:11 crc kubenswrapper[5016]: I1211 11:32:11.814926 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:11 crc kubenswrapper[5016]: I1211 11:32:11.850187 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5pd4c" podStartSLOduration=8.346152374 podStartE2EDuration="10.850164475s" podCreationTimestamp="2025-12-11 11:32:01 +0000 UTC" firstStartedPulling="2025-12-11 11:32:03.303703014 +0000 UTC m=+3440.122262583" lastFinishedPulling="2025-12-11 11:32:05.807715105 +0000 UTC m=+3442.626274684" observedRunningTime="2025-12-11 11:32:06.36666965 +0000 UTC m=+3443.185229269" watchObservedRunningTime="2025-12-11 11:32:11.850164475 +0000 UTC m=+3448.668724054" Dec 11 11:32:12 crc kubenswrapper[5016]: I1211 11:32:12.428420 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:12 crc kubenswrapper[5016]: I1211 11:32:12.479776 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5pd4c"] Dec 11 11:32:14 crc kubenswrapper[5016]: I1211 11:32:14.401628 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5pd4c" podUID="12253252-0d50-462c-b62a-3c75482c54a8" containerName="registry-server" containerID="cri-o://02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea" gracePeriod=2 Dec 11 11:32:14 crc kubenswrapper[5016]: I1211 11:32:14.920340 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.125489 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12253252-0d50-462c-b62a-3c75482c54a8-utilities\") pod \"12253252-0d50-462c-b62a-3c75482c54a8\" (UID: \"12253252-0d50-462c-b62a-3c75482c54a8\") " Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.125632 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdll4\" (UniqueName: \"kubernetes.io/projected/12253252-0d50-462c-b62a-3c75482c54a8-kube-api-access-wdll4\") pod \"12253252-0d50-462c-b62a-3c75482c54a8\" (UID: \"12253252-0d50-462c-b62a-3c75482c54a8\") " Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.125746 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12253252-0d50-462c-b62a-3c75482c54a8-catalog-content\") pod \"12253252-0d50-462c-b62a-3c75482c54a8\" (UID: \"12253252-0d50-462c-b62a-3c75482c54a8\") " Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.126838 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12253252-0d50-462c-b62a-3c75482c54a8-utilities" (OuterVolumeSpecName: "utilities") pod "12253252-0d50-462c-b62a-3c75482c54a8" (UID: "12253252-0d50-462c-b62a-3c75482c54a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.136019 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12253252-0d50-462c-b62a-3c75482c54a8-kube-api-access-wdll4" (OuterVolumeSpecName: "kube-api-access-wdll4") pod "12253252-0d50-462c-b62a-3c75482c54a8" (UID: "12253252-0d50-462c-b62a-3c75482c54a8"). InnerVolumeSpecName "kube-api-access-wdll4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.154988 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12253252-0d50-462c-b62a-3c75482c54a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12253252-0d50-462c-b62a-3c75482c54a8" (UID: "12253252-0d50-462c-b62a-3c75482c54a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.228641 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12253252-0d50-462c-b62a-3c75482c54a8-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.228694 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdll4\" (UniqueName: \"kubernetes.io/projected/12253252-0d50-462c-b62a-3c75482c54a8-kube-api-access-wdll4\") on node \"crc\" DevicePath \"\"" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.228713 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12253252-0d50-462c-b62a-3c75482c54a8-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.411591 5016 generic.go:334] "Generic (PLEG): container finished" podID="12253252-0d50-462c-b62a-3c75482c54a8" containerID="02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea" exitCode=0 Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.411632 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5pd4c" event={"ID":"12253252-0d50-462c-b62a-3c75482c54a8","Type":"ContainerDied","Data":"02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea"} Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.411657 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5pd4c" event={"ID":"12253252-0d50-462c-b62a-3c75482c54a8","Type":"ContainerDied","Data":"17fe72b430343e9bcf9e8c8f9664f25a699951e3645e1ead76c695707c9f7b20"} Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.411662 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5pd4c" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.411673 5016 scope.go:117] "RemoveContainer" containerID="02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.444250 5016 scope.go:117] "RemoveContainer" containerID="1d3514ab3f5b9f7066bee25591b78a0424042e24d1ad17a017ac52241d902bb2" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.452780 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5pd4c"] Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.466557 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5pd4c"] Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.466807 5016 scope.go:117] "RemoveContainer" containerID="a4973709c7b21ee0e1fe2eab36cf844b88e93cbcc0434ceaf09e11e21c21330d" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.475616 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.498363 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12253252-0d50-462c-b62a-3c75482c54a8" path="/var/lib/kubelet/pods/12253252-0d50-462c-b62a-3c75482c54a8/volumes" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.525644 5016 scope.go:117] "RemoveContainer" containerID="02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea" Dec 11 11:32:15 crc kubenswrapper[5016]: E1211 11:32:15.526354 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea\": container with ID starting with 02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea not found: ID does not exist" containerID="02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.526402 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea"} err="failed to get container status \"02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea\": rpc error: code = NotFound desc = could not find container \"02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea\": container with ID starting with 02609022c70bf3d27e82962083319fa3fed84512907b171b4fba72ed1a90e9ea not found: ID does not exist" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.526429 5016 scope.go:117] "RemoveContainer" containerID="1d3514ab3f5b9f7066bee25591b78a0424042e24d1ad17a017ac52241d902bb2" Dec 11 11:32:15 crc kubenswrapper[5016]: E1211 11:32:15.526971 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d3514ab3f5b9f7066bee25591b78a0424042e24d1ad17a017ac52241d902bb2\": container with ID starting with 1d3514ab3f5b9f7066bee25591b78a0424042e24d1ad17a017ac52241d902bb2 not found: ID does not exist" containerID="1d3514ab3f5b9f7066bee25591b78a0424042e24d1ad17a017ac52241d902bb2" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.527016 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d3514ab3f5b9f7066bee25591b78a0424042e24d1ad17a017ac52241d902bb2"} err="failed to get container status \"1d3514ab3f5b9f7066bee25591b78a0424042e24d1ad17a017ac52241d902bb2\": rpc error: code = NotFound desc = could not find container \"1d3514ab3f5b9f7066bee25591b78a0424042e24d1ad17a017ac52241d902bb2\": container with ID starting with 1d3514ab3f5b9f7066bee25591b78a0424042e24d1ad17a017ac52241d902bb2 not found: ID does not exist" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.527059 5016 scope.go:117] "RemoveContainer" containerID="a4973709c7b21ee0e1fe2eab36cf844b88e93cbcc0434ceaf09e11e21c21330d" Dec 11 11:32:15 crc kubenswrapper[5016]: E1211 11:32:15.527422 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4973709c7b21ee0e1fe2eab36cf844b88e93cbcc0434ceaf09e11e21c21330d\": container with ID starting with a4973709c7b21ee0e1fe2eab36cf844b88e93cbcc0434ceaf09e11e21c21330d not found: ID does not exist" containerID="a4973709c7b21ee0e1fe2eab36cf844b88e93cbcc0434ceaf09e11e21c21330d" Dec 11 11:32:15 crc kubenswrapper[5016]: I1211 11:32:15.527457 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4973709c7b21ee0e1fe2eab36cf844b88e93cbcc0434ceaf09e11e21c21330d"} err="failed to get container status \"a4973709c7b21ee0e1fe2eab36cf844b88e93cbcc0434ceaf09e11e21c21330d\": rpc error: code = NotFound desc = could not find container \"a4973709c7b21ee0e1fe2eab36cf844b88e93cbcc0434ceaf09e11e21c21330d\": container with ID starting with a4973709c7b21ee0e1fe2eab36cf844b88e93cbcc0434ceaf09e11e21c21330d not found: ID does not exist" Dec 11 11:32:16 crc kubenswrapper[5016]: I1211 11:32:16.422482 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"bb4758d65cf21f849ad86ea304787576e73defababc5c9ad45257a22b121f4c9"} Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.035201 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kbcvx"] Dec 11 11:32:58 crc kubenswrapper[5016]: E1211 11:32:58.042479 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12253252-0d50-462c-b62a-3c75482c54a8" containerName="extract-utilities" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.042501 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="12253252-0d50-462c-b62a-3c75482c54a8" containerName="extract-utilities" Dec 11 11:32:58 crc kubenswrapper[5016]: E1211 11:32:58.042532 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12253252-0d50-462c-b62a-3c75482c54a8" containerName="extract-content" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.042541 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="12253252-0d50-462c-b62a-3c75482c54a8" containerName="extract-content" Dec 11 11:32:58 crc kubenswrapper[5016]: E1211 11:32:58.042564 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12253252-0d50-462c-b62a-3c75482c54a8" containerName="registry-server" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.042573 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="12253252-0d50-462c-b62a-3c75482c54a8" containerName="registry-server" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.042807 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="12253252-0d50-462c-b62a-3c75482c54a8" containerName="registry-server" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.044667 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.048744 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kbcvx"] Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.113989 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbhpz\" (UniqueName: \"kubernetes.io/projected/daf284f1-abec-4b10-9857-94ce27403759-kube-api-access-pbhpz\") pod \"certified-operators-kbcvx\" (UID: \"daf284f1-abec-4b10-9857-94ce27403759\") " pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.114257 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf284f1-abec-4b10-9857-94ce27403759-catalog-content\") pod \"certified-operators-kbcvx\" (UID: \"daf284f1-abec-4b10-9857-94ce27403759\") " pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.114389 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf284f1-abec-4b10-9857-94ce27403759-utilities\") pod \"certified-operators-kbcvx\" (UID: \"daf284f1-abec-4b10-9857-94ce27403759\") " pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.215613 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf284f1-abec-4b10-9857-94ce27403759-catalog-content\") pod \"certified-operators-kbcvx\" (UID: \"daf284f1-abec-4b10-9857-94ce27403759\") " pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.215718 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf284f1-abec-4b10-9857-94ce27403759-utilities\") pod \"certified-operators-kbcvx\" (UID: \"daf284f1-abec-4b10-9857-94ce27403759\") " pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.215838 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbhpz\" (UniqueName: \"kubernetes.io/projected/daf284f1-abec-4b10-9857-94ce27403759-kube-api-access-pbhpz\") pod \"certified-operators-kbcvx\" (UID: \"daf284f1-abec-4b10-9857-94ce27403759\") " pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.216186 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf284f1-abec-4b10-9857-94ce27403759-utilities\") pod \"certified-operators-kbcvx\" (UID: \"daf284f1-abec-4b10-9857-94ce27403759\") " pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.216547 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf284f1-abec-4b10-9857-94ce27403759-catalog-content\") pod \"certified-operators-kbcvx\" (UID: \"daf284f1-abec-4b10-9857-94ce27403759\") " pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.236857 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbhpz\" (UniqueName: \"kubernetes.io/projected/daf284f1-abec-4b10-9857-94ce27403759-kube-api-access-pbhpz\") pod \"certified-operators-kbcvx\" (UID: \"daf284f1-abec-4b10-9857-94ce27403759\") " pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.363376 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.855468 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kbcvx"] Dec 11 11:32:58 crc kubenswrapper[5016]: I1211 11:32:58.898263 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kbcvx" event={"ID":"daf284f1-abec-4b10-9857-94ce27403759","Type":"ContainerStarted","Data":"0e48e272ef91d7d624916504172d9716e7dd9da0f23fd082fdc6bd7f63245fb3"} Dec 11 11:32:59 crc kubenswrapper[5016]: I1211 11:32:59.908213 5016 generic.go:334] "Generic (PLEG): container finished" podID="daf284f1-abec-4b10-9857-94ce27403759" containerID="ca4c0b60172c2b59e93a89483a4cae91533f868d5f6a32c14165cc33f54873fb" exitCode=0 Dec 11 11:32:59 crc kubenswrapper[5016]: I1211 11:32:59.908259 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kbcvx" event={"ID":"daf284f1-abec-4b10-9857-94ce27403759","Type":"ContainerDied","Data":"ca4c0b60172c2b59e93a89483a4cae91533f868d5f6a32c14165cc33f54873fb"} Dec 11 11:33:00 crc kubenswrapper[5016]: I1211 11:33:00.918917 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kbcvx" event={"ID":"daf284f1-abec-4b10-9857-94ce27403759","Type":"ContainerStarted","Data":"3502b80f54660f1968cc205f8da8a8d81bd624e4856f41c2760850f6f108965c"} Dec 11 11:33:02 crc kubenswrapper[5016]: I1211 11:33:02.940568 5016 generic.go:334] "Generic (PLEG): container finished" podID="daf284f1-abec-4b10-9857-94ce27403759" containerID="3502b80f54660f1968cc205f8da8a8d81bd624e4856f41c2760850f6f108965c" exitCode=0 Dec 11 11:33:02 crc kubenswrapper[5016]: I1211 11:33:02.940687 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kbcvx" event={"ID":"daf284f1-abec-4b10-9857-94ce27403759","Type":"ContainerDied","Data":"3502b80f54660f1968cc205f8da8a8d81bd624e4856f41c2760850f6f108965c"} Dec 11 11:33:03 crc kubenswrapper[5016]: I1211 11:33:03.956217 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kbcvx" event={"ID":"daf284f1-abec-4b10-9857-94ce27403759","Type":"ContainerStarted","Data":"5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899"} Dec 11 11:33:03 crc kubenswrapper[5016]: I1211 11:33:03.982743 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kbcvx" podStartSLOduration=2.529129962 podStartE2EDuration="5.98272549s" podCreationTimestamp="2025-12-11 11:32:58 +0000 UTC" firstStartedPulling="2025-12-11 11:32:59.910526151 +0000 UTC m=+3496.729085730" lastFinishedPulling="2025-12-11 11:33:03.364121679 +0000 UTC m=+3500.182681258" observedRunningTime="2025-12-11 11:33:03.977105962 +0000 UTC m=+3500.795665541" watchObservedRunningTime="2025-12-11 11:33:03.98272549 +0000 UTC m=+3500.801285069" Dec 11 11:33:08 crc kubenswrapper[5016]: I1211 11:33:08.364507 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:33:08 crc kubenswrapper[5016]: I1211 11:33:08.366639 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:33:08 crc kubenswrapper[5016]: I1211 11:33:08.421078 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:33:09 crc kubenswrapper[5016]: I1211 11:33:09.064858 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:33:09 crc kubenswrapper[5016]: I1211 11:33:09.136417 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kbcvx"] Dec 11 11:33:11 crc kubenswrapper[5016]: I1211 11:33:11.030292 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kbcvx" podUID="daf284f1-abec-4b10-9857-94ce27403759" containerName="registry-server" containerID="cri-o://5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899" gracePeriod=2 Dec 11 11:33:11 crc kubenswrapper[5016]: I1211 11:33:11.555054 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:33:11 crc kubenswrapper[5016]: I1211 11:33:11.720297 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf284f1-abec-4b10-9857-94ce27403759-utilities\") pod \"daf284f1-abec-4b10-9857-94ce27403759\" (UID: \"daf284f1-abec-4b10-9857-94ce27403759\") " Dec 11 11:33:11 crc kubenswrapper[5016]: I1211 11:33:11.720403 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbhpz\" (UniqueName: \"kubernetes.io/projected/daf284f1-abec-4b10-9857-94ce27403759-kube-api-access-pbhpz\") pod \"daf284f1-abec-4b10-9857-94ce27403759\" (UID: \"daf284f1-abec-4b10-9857-94ce27403759\") " Dec 11 11:33:11 crc kubenswrapper[5016]: I1211 11:33:11.720499 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf284f1-abec-4b10-9857-94ce27403759-catalog-content\") pod \"daf284f1-abec-4b10-9857-94ce27403759\" (UID: \"daf284f1-abec-4b10-9857-94ce27403759\") " Dec 11 11:33:11 crc kubenswrapper[5016]: I1211 11:33:11.721331 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/daf284f1-abec-4b10-9857-94ce27403759-utilities" (OuterVolumeSpecName: "utilities") pod "daf284f1-abec-4b10-9857-94ce27403759" (UID: "daf284f1-abec-4b10-9857-94ce27403759"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:33:11 crc kubenswrapper[5016]: I1211 11:33:11.729048 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daf284f1-abec-4b10-9857-94ce27403759-kube-api-access-pbhpz" (OuterVolumeSpecName: "kube-api-access-pbhpz") pod "daf284f1-abec-4b10-9857-94ce27403759" (UID: "daf284f1-abec-4b10-9857-94ce27403759"). InnerVolumeSpecName "kube-api-access-pbhpz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:33:11 crc kubenswrapper[5016]: I1211 11:33:11.786750 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/daf284f1-abec-4b10-9857-94ce27403759-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "daf284f1-abec-4b10-9857-94ce27403759" (UID: "daf284f1-abec-4b10-9857-94ce27403759"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:33:11 crc kubenswrapper[5016]: I1211 11:33:11.823279 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf284f1-abec-4b10-9857-94ce27403759-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:33:11 crc kubenswrapper[5016]: I1211 11:33:11.823325 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbhpz\" (UniqueName: \"kubernetes.io/projected/daf284f1-abec-4b10-9857-94ce27403759-kube-api-access-pbhpz\") on node \"crc\" DevicePath \"\"" Dec 11 11:33:11 crc kubenswrapper[5016]: I1211 11:33:11.823345 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf284f1-abec-4b10-9857-94ce27403759-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.069509 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kbcvx" Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.069525 5016 generic.go:334] "Generic (PLEG): container finished" podID="daf284f1-abec-4b10-9857-94ce27403759" containerID="5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899" exitCode=0 Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.069594 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kbcvx" event={"ID":"daf284f1-abec-4b10-9857-94ce27403759","Type":"ContainerDied","Data":"5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899"} Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.069632 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kbcvx" event={"ID":"daf284f1-abec-4b10-9857-94ce27403759","Type":"ContainerDied","Data":"0e48e272ef91d7d624916504172d9716e7dd9da0f23fd082fdc6bd7f63245fb3"} Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.069654 5016 scope.go:117] "RemoveContainer" containerID="5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899" Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.109731 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kbcvx"] Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.117734 5016 scope.go:117] "RemoveContainer" containerID="3502b80f54660f1968cc205f8da8a8d81bd624e4856f41c2760850f6f108965c" Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.122682 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kbcvx"] Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.139825 5016 scope.go:117] "RemoveContainer" containerID="ca4c0b60172c2b59e93a89483a4cae91533f868d5f6a32c14165cc33f54873fb" Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.195161 5016 scope.go:117] "RemoveContainer" containerID="5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899" Dec 11 11:33:12 crc kubenswrapper[5016]: E1211 11:33:12.195677 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899\": container with ID starting with 5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899 not found: ID does not exist" containerID="5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899" Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.195846 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899"} err="failed to get container status \"5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899\": rpc error: code = NotFound desc = could not find container \"5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899\": container with ID starting with 5bd92bf14da8a397229c39222a974d06e79ea8948c7c2dc35670c050e887a899 not found: ID does not exist" Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.195988 5016 scope.go:117] "RemoveContainer" containerID="3502b80f54660f1968cc205f8da8a8d81bd624e4856f41c2760850f6f108965c" Dec 11 11:33:12 crc kubenswrapper[5016]: E1211 11:33:12.196615 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3502b80f54660f1968cc205f8da8a8d81bd624e4856f41c2760850f6f108965c\": container with ID starting with 3502b80f54660f1968cc205f8da8a8d81bd624e4856f41c2760850f6f108965c not found: ID does not exist" containerID="3502b80f54660f1968cc205f8da8a8d81bd624e4856f41c2760850f6f108965c" Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.196647 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3502b80f54660f1968cc205f8da8a8d81bd624e4856f41c2760850f6f108965c"} err="failed to get container status \"3502b80f54660f1968cc205f8da8a8d81bd624e4856f41c2760850f6f108965c\": rpc error: code = NotFound desc = could not find container \"3502b80f54660f1968cc205f8da8a8d81bd624e4856f41c2760850f6f108965c\": container with ID starting with 3502b80f54660f1968cc205f8da8a8d81bd624e4856f41c2760850f6f108965c not found: ID does not exist" Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.196666 5016 scope.go:117] "RemoveContainer" containerID="ca4c0b60172c2b59e93a89483a4cae91533f868d5f6a32c14165cc33f54873fb" Dec 11 11:33:12 crc kubenswrapper[5016]: E1211 11:33:12.197064 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca4c0b60172c2b59e93a89483a4cae91533f868d5f6a32c14165cc33f54873fb\": container with ID starting with ca4c0b60172c2b59e93a89483a4cae91533f868d5f6a32c14165cc33f54873fb not found: ID does not exist" containerID="ca4c0b60172c2b59e93a89483a4cae91533f868d5f6a32c14165cc33f54873fb" Dec 11 11:33:12 crc kubenswrapper[5016]: I1211 11:33:12.197234 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca4c0b60172c2b59e93a89483a4cae91533f868d5f6a32c14165cc33f54873fb"} err="failed to get container status \"ca4c0b60172c2b59e93a89483a4cae91533f868d5f6a32c14165cc33f54873fb\": rpc error: code = NotFound desc = could not find container \"ca4c0b60172c2b59e93a89483a4cae91533f868d5f6a32c14165cc33f54873fb\": container with ID starting with ca4c0b60172c2b59e93a89483a4cae91533f868d5f6a32c14165cc33f54873fb not found: ID does not exist" Dec 11 11:33:13 crc kubenswrapper[5016]: I1211 11:33:13.496296 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="daf284f1-abec-4b10-9857-94ce27403759" path="/var/lib/kubelet/pods/daf284f1-abec-4b10-9857-94ce27403759/volumes" Dec 11 11:34:42 crc kubenswrapper[5016]: I1211 11:34:42.933634 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:34:42 crc kubenswrapper[5016]: I1211 11:34:42.935373 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:35:12 crc kubenswrapper[5016]: I1211 11:35:12.932640 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:35:12 crc kubenswrapper[5016]: I1211 11:35:12.935187 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:35:42 crc kubenswrapper[5016]: I1211 11:35:42.932626 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:35:42 crc kubenswrapper[5016]: I1211 11:35:42.933391 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:35:42 crc kubenswrapper[5016]: I1211 11:35:42.933464 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 11:35:42 crc kubenswrapper[5016]: I1211 11:35:42.934610 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bb4758d65cf21f849ad86ea304787576e73defababc5c9ad45257a22b121f4c9"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 11:35:42 crc kubenswrapper[5016]: I1211 11:35:42.934712 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://bb4758d65cf21f849ad86ea304787576e73defababc5c9ad45257a22b121f4c9" gracePeriod=600 Dec 11 11:35:43 crc kubenswrapper[5016]: I1211 11:35:43.748296 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="bb4758d65cf21f849ad86ea304787576e73defababc5c9ad45257a22b121f4c9" exitCode=0 Dec 11 11:35:43 crc kubenswrapper[5016]: I1211 11:35:43.748380 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"bb4758d65cf21f849ad86ea304787576e73defababc5c9ad45257a22b121f4c9"} Dec 11 11:35:43 crc kubenswrapper[5016]: I1211 11:35:43.748963 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03"} Dec 11 11:35:43 crc kubenswrapper[5016]: I1211 11:35:43.748988 5016 scope.go:117] "RemoveContainer" containerID="14130bfe3c616be3f06ca6fa53723f7ea8c547b3982f8049bc0b4932c83af1d8" Dec 11 11:36:17 crc kubenswrapper[5016]: I1211 11:36:17.152501 5016 generic.go:334] "Generic (PLEG): container finished" podID="d9613e90-5366-4f68-80dd-f66a7541a670" containerID="1fa15149d2490ac1839de31b01d3939635b23b0f98e6ffc16a73f4d0f7cc2726" exitCode=0 Dec 11 11:36:17 crc kubenswrapper[5016]: I1211 11:36:17.152613 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"d9613e90-5366-4f68-80dd-f66a7541a670","Type":"ContainerDied","Data":"1fa15149d2490ac1839de31b01d3939635b23b0f98e6ffc16a73f4d0f7cc2726"} Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.594062 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.642373 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/d9613e90-5366-4f68-80dd-f66a7541a670-test-operator-ephemeral-temporary\") pod \"d9613e90-5366-4f68-80dd-f66a7541a670\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.642472 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-ca-certs\") pod \"d9613e90-5366-4f68-80dd-f66a7541a670\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.642500 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-ssh-key\") pod \"d9613e90-5366-4f68-80dd-f66a7541a670\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.642523 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d9613e90-5366-4f68-80dd-f66a7541a670-openstack-config\") pod \"d9613e90-5366-4f68-80dd-f66a7541a670\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.642545 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-openstack-config-secret\") pod \"d9613e90-5366-4f68-80dd-f66a7541a670\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.642561 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7h7g\" (UniqueName: \"kubernetes.io/projected/d9613e90-5366-4f68-80dd-f66a7541a670-kube-api-access-b7h7g\") pod \"d9613e90-5366-4f68-80dd-f66a7541a670\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.643367 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9613e90-5366-4f68-80dd-f66a7541a670-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "d9613e90-5366-4f68-80dd-f66a7541a670" (UID: "d9613e90-5366-4f68-80dd-f66a7541a670"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.642594 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/d9613e90-5366-4f68-80dd-f66a7541a670-test-operator-ephemeral-workdir\") pod \"d9613e90-5366-4f68-80dd-f66a7541a670\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.649278 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d9613e90-5366-4f68-80dd-f66a7541a670-config-data\") pod \"d9613e90-5366-4f68-80dd-f66a7541a670\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.649321 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"d9613e90-5366-4f68-80dd-f66a7541a670\" (UID: \"d9613e90-5366-4f68-80dd-f66a7541a670\") " Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.649404 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9613e90-5366-4f68-80dd-f66a7541a670-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "d9613e90-5366-4f68-80dd-f66a7541a670" (UID: "d9613e90-5366-4f68-80dd-f66a7541a670"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.649583 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9613e90-5366-4f68-80dd-f66a7541a670-kube-api-access-b7h7g" (OuterVolumeSpecName: "kube-api-access-b7h7g") pod "d9613e90-5366-4f68-80dd-f66a7541a670" (UID: "d9613e90-5366-4f68-80dd-f66a7541a670"). InnerVolumeSpecName "kube-api-access-b7h7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.650230 5016 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/d9613e90-5366-4f68-80dd-f66a7541a670-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.650258 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7h7g\" (UniqueName: \"kubernetes.io/projected/d9613e90-5366-4f68-80dd-f66a7541a670-kube-api-access-b7h7g\") on node \"crc\" DevicePath \"\"" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.650271 5016 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/d9613e90-5366-4f68-80dd-f66a7541a670-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.650328 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9613e90-5366-4f68-80dd-f66a7541a670-config-data" (OuterVolumeSpecName: "config-data") pod "d9613e90-5366-4f68-80dd-f66a7541a670" (UID: "d9613e90-5366-4f68-80dd-f66a7541a670"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.653420 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "test-operator-logs") pod "d9613e90-5366-4f68-80dd-f66a7541a670" (UID: "d9613e90-5366-4f68-80dd-f66a7541a670"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.673470 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "d9613e90-5366-4f68-80dd-f66a7541a670" (UID: "d9613e90-5366-4f68-80dd-f66a7541a670"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.674976 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d9613e90-5366-4f68-80dd-f66a7541a670" (UID: "d9613e90-5366-4f68-80dd-f66a7541a670"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.678583 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "d9613e90-5366-4f68-80dd-f66a7541a670" (UID: "d9613e90-5366-4f68-80dd-f66a7541a670"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.704882 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9613e90-5366-4f68-80dd-f66a7541a670-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "d9613e90-5366-4f68-80dd-f66a7541a670" (UID: "d9613e90-5366-4f68-80dd-f66a7541a670"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.751065 5016 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-ca-certs\") on node \"crc\" DevicePath \"\"" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.751093 5016 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.751102 5016 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d9613e90-5366-4f68-80dd-f66a7541a670-openstack-config\") on node \"crc\" DevicePath \"\"" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.751113 5016 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d9613e90-5366-4f68-80dd-f66a7541a670-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.751122 5016 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d9613e90-5366-4f68-80dd-f66a7541a670-config-data\") on node \"crc\" DevicePath \"\"" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.751153 5016 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.771461 5016 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Dec 11 11:36:18 crc kubenswrapper[5016]: I1211 11:36:18.852464 5016 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Dec 11 11:36:19 crc kubenswrapper[5016]: I1211 11:36:19.173361 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"d9613e90-5366-4f68-80dd-f66a7541a670","Type":"ContainerDied","Data":"45bccf0c4071558dbe16f6fcf48ed1b06d5ed54d34d26a8c681d7cb8b4ce73f5"} Dec 11 11:36:19 crc kubenswrapper[5016]: I1211 11:36:19.173699 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45bccf0c4071558dbe16f6fcf48ed1b06d5ed54d34d26a8c681d7cb8b4ce73f5" Dec 11 11:36:19 crc kubenswrapper[5016]: I1211 11:36:19.173407 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.090350 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Dec 11 11:36:31 crc kubenswrapper[5016]: E1211 11:36:31.091256 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9613e90-5366-4f68-80dd-f66a7541a670" containerName="tempest-tests-tempest-tests-runner" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.091269 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9613e90-5366-4f68-80dd-f66a7541a670" containerName="tempest-tests-tempest-tests-runner" Dec 11 11:36:31 crc kubenswrapper[5016]: E1211 11:36:31.091287 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daf284f1-abec-4b10-9857-94ce27403759" containerName="registry-server" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.091293 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="daf284f1-abec-4b10-9857-94ce27403759" containerName="registry-server" Dec 11 11:36:31 crc kubenswrapper[5016]: E1211 11:36:31.091301 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daf284f1-abec-4b10-9857-94ce27403759" containerName="extract-utilities" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.091309 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="daf284f1-abec-4b10-9857-94ce27403759" containerName="extract-utilities" Dec 11 11:36:31 crc kubenswrapper[5016]: E1211 11:36:31.091316 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daf284f1-abec-4b10-9857-94ce27403759" containerName="extract-content" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.091322 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="daf284f1-abec-4b10-9857-94ce27403759" containerName="extract-content" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.091518 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="daf284f1-abec-4b10-9857-94ce27403759" containerName="registry-server" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.091548 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9613e90-5366-4f68-80dd-f66a7541a670" containerName="tempest-tests-tempest-tests-runner" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.097666 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.107832 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-dwb7v" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.140099 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.211635 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"51a74a7d-9731-443f-b85f-99645084064a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.211696 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjffj\" (UniqueName: \"kubernetes.io/projected/51a74a7d-9731-443f-b85f-99645084064a-kube-api-access-sjffj\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"51a74a7d-9731-443f-b85f-99645084064a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.314634 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"51a74a7d-9731-443f-b85f-99645084064a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.314729 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjffj\" (UniqueName: \"kubernetes.io/projected/51a74a7d-9731-443f-b85f-99645084064a-kube-api-access-sjffj\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"51a74a7d-9731-443f-b85f-99645084064a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.315819 5016 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"51a74a7d-9731-443f-b85f-99645084064a\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.337182 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjffj\" (UniqueName: \"kubernetes.io/projected/51a74a7d-9731-443f-b85f-99645084064a-kube-api-access-sjffj\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"51a74a7d-9731-443f-b85f-99645084064a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.358854 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"51a74a7d-9731-443f-b85f-99645084064a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.435497 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 11 11:36:31 crc kubenswrapper[5016]: I1211 11:36:31.904272 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Dec 11 11:36:32 crc kubenswrapper[5016]: I1211 11:36:32.293425 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"51a74a7d-9731-443f-b85f-99645084064a","Type":"ContainerStarted","Data":"2055693c1c383153a1aa9c90cba53eb3829af06e7669d335f136f5f72a27a0fa"} Dec 11 11:36:34 crc kubenswrapper[5016]: I1211 11:36:34.317874 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"51a74a7d-9731-443f-b85f-99645084064a","Type":"ContainerStarted","Data":"5a87ef9958715ba0ce5a85052e48ac313ac2b24fbd64fcebf9918f681d0bf3d2"} Dec 11 11:36:34 crc kubenswrapper[5016]: I1211 11:36:34.335206 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=1.495893086 podStartE2EDuration="3.335179053s" podCreationTimestamp="2025-12-11 11:36:31 +0000 UTC" firstStartedPulling="2025-12-11 11:36:31.908201494 +0000 UTC m=+3708.726761073" lastFinishedPulling="2025-12-11 11:36:33.747487461 +0000 UTC m=+3710.566047040" observedRunningTime="2025-12-11 11:36:34.334217089 +0000 UTC m=+3711.152776728" watchObservedRunningTime="2025-12-11 11:36:34.335179053 +0000 UTC m=+3711.153738652" Dec 11 11:36:57 crc kubenswrapper[5016]: I1211 11:36:57.991135 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pq25m/must-gather-sfprq"] Dec 11 11:36:57 crc kubenswrapper[5016]: I1211 11:36:57.994050 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/must-gather-sfprq" Dec 11 11:36:58 crc kubenswrapper[5016]: I1211 11:36:58.002374 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-pq25m"/"kube-root-ca.crt" Dec 11 11:36:58 crc kubenswrapper[5016]: I1211 11:36:58.002561 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-pq25m"/"openshift-service-ca.crt" Dec 11 11:36:58 crc kubenswrapper[5016]: I1211 11:36:58.022755 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-pq25m/must-gather-sfprq"] Dec 11 11:36:58 crc kubenswrapper[5016]: I1211 11:36:58.131352 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nn8jl\" (UniqueName: \"kubernetes.io/projected/080905d3-b916-4151-b81b-b7b30fe7c291-kube-api-access-nn8jl\") pod \"must-gather-sfprq\" (UID: \"080905d3-b916-4151-b81b-b7b30fe7c291\") " pod="openshift-must-gather-pq25m/must-gather-sfprq" Dec 11 11:36:58 crc kubenswrapper[5016]: I1211 11:36:58.131497 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/080905d3-b916-4151-b81b-b7b30fe7c291-must-gather-output\") pod \"must-gather-sfprq\" (UID: \"080905d3-b916-4151-b81b-b7b30fe7c291\") " pod="openshift-must-gather-pq25m/must-gather-sfprq" Dec 11 11:36:58 crc kubenswrapper[5016]: I1211 11:36:58.233048 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/080905d3-b916-4151-b81b-b7b30fe7c291-must-gather-output\") pod \"must-gather-sfprq\" (UID: \"080905d3-b916-4151-b81b-b7b30fe7c291\") " pod="openshift-must-gather-pq25m/must-gather-sfprq" Dec 11 11:36:58 crc kubenswrapper[5016]: I1211 11:36:58.233155 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nn8jl\" (UniqueName: \"kubernetes.io/projected/080905d3-b916-4151-b81b-b7b30fe7c291-kube-api-access-nn8jl\") pod \"must-gather-sfprq\" (UID: \"080905d3-b916-4151-b81b-b7b30fe7c291\") " pod="openshift-must-gather-pq25m/must-gather-sfprq" Dec 11 11:36:58 crc kubenswrapper[5016]: I1211 11:36:58.233704 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/080905d3-b916-4151-b81b-b7b30fe7c291-must-gather-output\") pod \"must-gather-sfprq\" (UID: \"080905d3-b916-4151-b81b-b7b30fe7c291\") " pod="openshift-must-gather-pq25m/must-gather-sfprq" Dec 11 11:36:58 crc kubenswrapper[5016]: I1211 11:36:58.259378 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nn8jl\" (UniqueName: \"kubernetes.io/projected/080905d3-b916-4151-b81b-b7b30fe7c291-kube-api-access-nn8jl\") pod \"must-gather-sfprq\" (UID: \"080905d3-b916-4151-b81b-b7b30fe7c291\") " pod="openshift-must-gather-pq25m/must-gather-sfprq" Dec 11 11:36:58 crc kubenswrapper[5016]: I1211 11:36:58.322780 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/must-gather-sfprq" Dec 11 11:36:58 crc kubenswrapper[5016]: I1211 11:36:58.811617 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-pq25m/must-gather-sfprq"] Dec 11 11:36:59 crc kubenswrapper[5016]: I1211 11:36:59.594862 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pq25m/must-gather-sfprq" event={"ID":"080905d3-b916-4151-b81b-b7b30fe7c291","Type":"ContainerStarted","Data":"e69f8be6b25bf16046372ee74a37e8c2bd34b01cf2363c7ea000ec4b6cbf77b7"} Dec 11 11:37:06 crc kubenswrapper[5016]: I1211 11:37:06.695606 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pq25m/must-gather-sfprq" event={"ID":"080905d3-b916-4151-b81b-b7b30fe7c291","Type":"ContainerStarted","Data":"8dae247535b83517bf7568847b201f9618870283ccba7ca98bce54b3770e13f7"} Dec 11 11:37:06 crc kubenswrapper[5016]: I1211 11:37:06.696353 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pq25m/must-gather-sfprq" event={"ID":"080905d3-b916-4151-b81b-b7b30fe7c291","Type":"ContainerStarted","Data":"f49ce6bf40755ad3151fe6c669bb24699599a2dea94483849668ecc99388663c"} Dec 11 11:37:06 crc kubenswrapper[5016]: I1211 11:37:06.716311 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pq25m/must-gather-sfprq" podStartSLOduration=2.453886711 podStartE2EDuration="9.71629086s" podCreationTimestamp="2025-12-11 11:36:57 +0000 UTC" firstStartedPulling="2025-12-11 11:36:58.819458122 +0000 UTC m=+3735.638017701" lastFinishedPulling="2025-12-11 11:37:06.081862271 +0000 UTC m=+3742.900421850" observedRunningTime="2025-12-11 11:37:06.7118007 +0000 UTC m=+3743.530360289" watchObservedRunningTime="2025-12-11 11:37:06.71629086 +0000 UTC m=+3743.534850439" Dec 11 11:37:10 crc kubenswrapper[5016]: I1211 11:37:10.204591 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pq25m/crc-debug-7dmlt"] Dec 11 11:37:10 crc kubenswrapper[5016]: I1211 11:37:10.206728 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/crc-debug-7dmlt" Dec 11 11:37:10 crc kubenswrapper[5016]: I1211 11:37:10.209693 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-pq25m"/"default-dockercfg-kqt7n" Dec 11 11:37:10 crc kubenswrapper[5016]: I1211 11:37:10.331149 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5pbw\" (UniqueName: \"kubernetes.io/projected/9362a0e6-f65b-4b51-b6a6-d44da680cfa1-kube-api-access-s5pbw\") pod \"crc-debug-7dmlt\" (UID: \"9362a0e6-f65b-4b51-b6a6-d44da680cfa1\") " pod="openshift-must-gather-pq25m/crc-debug-7dmlt" Dec 11 11:37:10 crc kubenswrapper[5016]: I1211 11:37:10.331421 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9362a0e6-f65b-4b51-b6a6-d44da680cfa1-host\") pod \"crc-debug-7dmlt\" (UID: \"9362a0e6-f65b-4b51-b6a6-d44da680cfa1\") " pod="openshift-must-gather-pq25m/crc-debug-7dmlt" Dec 11 11:37:10 crc kubenswrapper[5016]: I1211 11:37:10.433422 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9362a0e6-f65b-4b51-b6a6-d44da680cfa1-host\") pod \"crc-debug-7dmlt\" (UID: \"9362a0e6-f65b-4b51-b6a6-d44da680cfa1\") " pod="openshift-must-gather-pq25m/crc-debug-7dmlt" Dec 11 11:37:10 crc kubenswrapper[5016]: I1211 11:37:10.433570 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9362a0e6-f65b-4b51-b6a6-d44da680cfa1-host\") pod \"crc-debug-7dmlt\" (UID: \"9362a0e6-f65b-4b51-b6a6-d44da680cfa1\") " pod="openshift-must-gather-pq25m/crc-debug-7dmlt" Dec 11 11:37:10 crc kubenswrapper[5016]: I1211 11:37:10.433581 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5pbw\" (UniqueName: \"kubernetes.io/projected/9362a0e6-f65b-4b51-b6a6-d44da680cfa1-kube-api-access-s5pbw\") pod \"crc-debug-7dmlt\" (UID: \"9362a0e6-f65b-4b51-b6a6-d44da680cfa1\") " pod="openshift-must-gather-pq25m/crc-debug-7dmlt" Dec 11 11:37:10 crc kubenswrapper[5016]: I1211 11:37:10.455848 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5pbw\" (UniqueName: \"kubernetes.io/projected/9362a0e6-f65b-4b51-b6a6-d44da680cfa1-kube-api-access-s5pbw\") pod \"crc-debug-7dmlt\" (UID: \"9362a0e6-f65b-4b51-b6a6-d44da680cfa1\") " pod="openshift-must-gather-pq25m/crc-debug-7dmlt" Dec 11 11:37:10 crc kubenswrapper[5016]: I1211 11:37:10.530012 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/crc-debug-7dmlt" Dec 11 11:37:10 crc kubenswrapper[5016]: I1211 11:37:10.647338 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 11:37:10 crc kubenswrapper[5016]: I1211 11:37:10.753406 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pq25m/crc-debug-7dmlt" event={"ID":"9362a0e6-f65b-4b51-b6a6-d44da680cfa1","Type":"ContainerStarted","Data":"d39cc663b8d3d939d85e013c257cbd5e27912ab0984e723f3201a4abc0e9beae"} Dec 11 11:37:23 crc kubenswrapper[5016]: I1211 11:37:23.889526 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pq25m/crc-debug-7dmlt" event={"ID":"9362a0e6-f65b-4b51-b6a6-d44da680cfa1","Type":"ContainerStarted","Data":"72f14fc6c0d16fe6532242cbc5e326a8f33cb3863bc3221ab74d07c1c6931703"} Dec 11 11:37:23 crc kubenswrapper[5016]: I1211 11:37:23.904869 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pq25m/crc-debug-7dmlt" podStartSLOduration=1.9150776330000001 podStartE2EDuration="13.904841311s" podCreationTimestamp="2025-12-11 11:37:10 +0000 UTC" firstStartedPulling="2025-12-11 11:37:10.647055486 +0000 UTC m=+3747.465615065" lastFinishedPulling="2025-12-11 11:37:22.636819174 +0000 UTC m=+3759.455378743" observedRunningTime="2025-12-11 11:37:23.902701559 +0000 UTC m=+3760.721261158" watchObservedRunningTime="2025-12-11 11:37:23.904841311 +0000 UTC m=+3760.723400900" Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.358320 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r6vfz"] Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.361887 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.374310 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r6vfz"] Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.444046 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-utilities\") pod \"redhat-operators-r6vfz\" (UID: \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\") " pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.444709 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-catalog-content\") pod \"redhat-operators-r6vfz\" (UID: \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\") " pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.444829 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8nz6\" (UniqueName: \"kubernetes.io/projected/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-kube-api-access-h8nz6\") pod \"redhat-operators-r6vfz\" (UID: \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\") " pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.547838 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-utilities\") pod \"redhat-operators-r6vfz\" (UID: \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\") " pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.548052 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-catalog-content\") pod \"redhat-operators-r6vfz\" (UID: \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\") " pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.548141 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8nz6\" (UniqueName: \"kubernetes.io/projected/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-kube-api-access-h8nz6\") pod \"redhat-operators-r6vfz\" (UID: \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\") " pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.548738 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-catalog-content\") pod \"redhat-operators-r6vfz\" (UID: \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\") " pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.548963 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-utilities\") pod \"redhat-operators-r6vfz\" (UID: \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\") " pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.573645 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8nz6\" (UniqueName: \"kubernetes.io/projected/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-kube-api-access-h8nz6\") pod \"redhat-operators-r6vfz\" (UID: \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\") " pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:02 crc kubenswrapper[5016]: I1211 11:38:02.697831 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:03 crc kubenswrapper[5016]: I1211 11:38:03.233902 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r6vfz"] Dec 11 11:38:03 crc kubenswrapper[5016]: I1211 11:38:03.276582 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6vfz" event={"ID":"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8","Type":"ContainerStarted","Data":"a0968f5fe0917d42e2e750a3a2a9968735dc41a51758c58b05064e5a9844cb76"} Dec 11 11:38:04 crc kubenswrapper[5016]: I1211 11:38:04.290831 5016 generic.go:334] "Generic (PLEG): container finished" podID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" containerID="fc5b4955321623838f42a25a06fee7c54cd6acd9a5b7822f6925fa25b45f99a6" exitCode=0 Dec 11 11:38:04 crc kubenswrapper[5016]: I1211 11:38:04.290892 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6vfz" event={"ID":"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8","Type":"ContainerDied","Data":"fc5b4955321623838f42a25a06fee7c54cd6acd9a5b7822f6925fa25b45f99a6"} Dec 11 11:38:05 crc kubenswrapper[5016]: I1211 11:38:05.303356 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6vfz" event={"ID":"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8","Type":"ContainerStarted","Data":"f74406c1609bceba39e3ecbcdedb83b93530e93bc1dbbe410f6dba8589b12fb5"} Dec 11 11:38:08 crc kubenswrapper[5016]: I1211 11:38:08.333872 5016 generic.go:334] "Generic (PLEG): container finished" podID="9362a0e6-f65b-4b51-b6a6-d44da680cfa1" containerID="72f14fc6c0d16fe6532242cbc5e326a8f33cb3863bc3221ab74d07c1c6931703" exitCode=0 Dec 11 11:38:08 crc kubenswrapper[5016]: I1211 11:38:08.333983 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pq25m/crc-debug-7dmlt" event={"ID":"9362a0e6-f65b-4b51-b6a6-d44da680cfa1","Type":"ContainerDied","Data":"72f14fc6c0d16fe6532242cbc5e326a8f33cb3863bc3221ab74d07c1c6931703"} Dec 11 11:38:08 crc kubenswrapper[5016]: I1211 11:38:08.338699 5016 generic.go:334] "Generic (PLEG): container finished" podID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" containerID="f74406c1609bceba39e3ecbcdedb83b93530e93bc1dbbe410f6dba8589b12fb5" exitCode=0 Dec 11 11:38:08 crc kubenswrapper[5016]: I1211 11:38:08.338760 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6vfz" event={"ID":"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8","Type":"ContainerDied","Data":"f74406c1609bceba39e3ecbcdedb83b93530e93bc1dbbe410f6dba8589b12fb5"} Dec 11 11:38:09 crc kubenswrapper[5016]: I1211 11:38:09.350665 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6vfz" event={"ID":"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8","Type":"ContainerStarted","Data":"863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023"} Dec 11 11:38:09 crc kubenswrapper[5016]: I1211 11:38:09.374330 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r6vfz" podStartSLOduration=2.826496615 podStartE2EDuration="7.374309183s" podCreationTimestamp="2025-12-11 11:38:02 +0000 UTC" firstStartedPulling="2025-12-11 11:38:04.292799229 +0000 UTC m=+3801.111358808" lastFinishedPulling="2025-12-11 11:38:08.840611787 +0000 UTC m=+3805.659171376" observedRunningTime="2025-12-11 11:38:09.368282585 +0000 UTC m=+3806.186842154" watchObservedRunningTime="2025-12-11 11:38:09.374309183 +0000 UTC m=+3806.192868762" Dec 11 11:38:09 crc kubenswrapper[5016]: I1211 11:38:09.453347 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/crc-debug-7dmlt" Dec 11 11:38:09 crc kubenswrapper[5016]: I1211 11:38:09.491661 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pq25m/crc-debug-7dmlt"] Dec 11 11:38:09 crc kubenswrapper[5016]: I1211 11:38:09.501926 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pq25m/crc-debug-7dmlt"] Dec 11 11:38:09 crc kubenswrapper[5016]: I1211 11:38:09.590895 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5pbw\" (UniqueName: \"kubernetes.io/projected/9362a0e6-f65b-4b51-b6a6-d44da680cfa1-kube-api-access-s5pbw\") pod \"9362a0e6-f65b-4b51-b6a6-d44da680cfa1\" (UID: \"9362a0e6-f65b-4b51-b6a6-d44da680cfa1\") " Dec 11 11:38:09 crc kubenswrapper[5016]: I1211 11:38:09.591029 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9362a0e6-f65b-4b51-b6a6-d44da680cfa1-host\") pod \"9362a0e6-f65b-4b51-b6a6-d44da680cfa1\" (UID: \"9362a0e6-f65b-4b51-b6a6-d44da680cfa1\") " Dec 11 11:38:09 crc kubenswrapper[5016]: I1211 11:38:09.591166 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9362a0e6-f65b-4b51-b6a6-d44da680cfa1-host" (OuterVolumeSpecName: "host") pod "9362a0e6-f65b-4b51-b6a6-d44da680cfa1" (UID: "9362a0e6-f65b-4b51-b6a6-d44da680cfa1"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 11:38:09 crc kubenswrapper[5016]: I1211 11:38:09.591694 5016 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9362a0e6-f65b-4b51-b6a6-d44da680cfa1-host\") on node \"crc\" DevicePath \"\"" Dec 11 11:38:09 crc kubenswrapper[5016]: I1211 11:38:09.598038 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9362a0e6-f65b-4b51-b6a6-d44da680cfa1-kube-api-access-s5pbw" (OuterVolumeSpecName: "kube-api-access-s5pbw") pod "9362a0e6-f65b-4b51-b6a6-d44da680cfa1" (UID: "9362a0e6-f65b-4b51-b6a6-d44da680cfa1"). InnerVolumeSpecName "kube-api-access-s5pbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:38:09 crc kubenswrapper[5016]: I1211 11:38:09.693700 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5pbw\" (UniqueName: \"kubernetes.io/projected/9362a0e6-f65b-4b51-b6a6-d44da680cfa1-kube-api-access-s5pbw\") on node \"crc\" DevicePath \"\"" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.363759 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d39cc663b8d3d939d85e013c257cbd5e27912ab0984e723f3201a4abc0e9beae" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.363885 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/crc-debug-7dmlt" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.723699 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pq25m/crc-debug-4sgl6"] Dec 11 11:38:10 crc kubenswrapper[5016]: E1211 11:38:10.724317 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9362a0e6-f65b-4b51-b6a6-d44da680cfa1" containerName="container-00" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.724339 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="9362a0e6-f65b-4b51-b6a6-d44da680cfa1" containerName="container-00" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.724553 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="9362a0e6-f65b-4b51-b6a6-d44da680cfa1" containerName="container-00" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.725461 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/crc-debug-4sgl6" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.728473 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-pq25m"/"default-dockercfg-kqt7n" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.821480 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27592\" (UniqueName: \"kubernetes.io/projected/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759-kube-api-access-27592\") pod \"crc-debug-4sgl6\" (UID: \"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759\") " pod="openshift-must-gather-pq25m/crc-debug-4sgl6" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.822301 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759-host\") pod \"crc-debug-4sgl6\" (UID: \"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759\") " pod="openshift-must-gather-pq25m/crc-debug-4sgl6" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.924438 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27592\" (UniqueName: \"kubernetes.io/projected/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759-kube-api-access-27592\") pod \"crc-debug-4sgl6\" (UID: \"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759\") " pod="openshift-must-gather-pq25m/crc-debug-4sgl6" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.924626 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759-host\") pod \"crc-debug-4sgl6\" (UID: \"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759\") " pod="openshift-must-gather-pq25m/crc-debug-4sgl6" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.924750 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759-host\") pod \"crc-debug-4sgl6\" (UID: \"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759\") " pod="openshift-must-gather-pq25m/crc-debug-4sgl6" Dec 11 11:38:10 crc kubenswrapper[5016]: I1211 11:38:10.944294 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27592\" (UniqueName: \"kubernetes.io/projected/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759-kube-api-access-27592\") pod \"crc-debug-4sgl6\" (UID: \"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759\") " pod="openshift-must-gather-pq25m/crc-debug-4sgl6" Dec 11 11:38:11 crc kubenswrapper[5016]: I1211 11:38:11.044556 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/crc-debug-4sgl6" Dec 11 11:38:11 crc kubenswrapper[5016]: I1211 11:38:11.374219 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pq25m/crc-debug-4sgl6" event={"ID":"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759","Type":"ContainerStarted","Data":"0ec9d53a6207b622ec90faaa2ecfa03d48e4c8f71d08923d71c95dbc3fc3875b"} Dec 11 11:38:11 crc kubenswrapper[5016]: I1211 11:38:11.374709 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pq25m/crc-debug-4sgl6" event={"ID":"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759","Type":"ContainerStarted","Data":"02e31859ff8182149dd522539b811b38c37f5f75435213927afee53a0d08b1ff"} Dec 11 11:38:11 crc kubenswrapper[5016]: I1211 11:38:11.406592 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pq25m/crc-debug-4sgl6" podStartSLOduration=1.4065516279999999 podStartE2EDuration="1.406551628s" podCreationTimestamp="2025-12-11 11:38:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 11:38:11.395235829 +0000 UTC m=+3808.213795428" watchObservedRunningTime="2025-12-11 11:38:11.406551628 +0000 UTC m=+3808.225111207" Dec 11 11:38:11 crc kubenswrapper[5016]: I1211 11:38:11.497984 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9362a0e6-f65b-4b51-b6a6-d44da680cfa1" path="/var/lib/kubelet/pods/9362a0e6-f65b-4b51-b6a6-d44da680cfa1/volumes" Dec 11 11:38:12 crc kubenswrapper[5016]: I1211 11:38:12.386592 5016 generic.go:334] "Generic (PLEG): container finished" podID="01d3a33d-7d89-4ca0-a219-3ae3d9f1d759" containerID="0ec9d53a6207b622ec90faaa2ecfa03d48e4c8f71d08923d71c95dbc3fc3875b" exitCode=0 Dec 11 11:38:12 crc kubenswrapper[5016]: I1211 11:38:12.386726 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pq25m/crc-debug-4sgl6" event={"ID":"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759","Type":"ContainerDied","Data":"0ec9d53a6207b622ec90faaa2ecfa03d48e4c8f71d08923d71c95dbc3fc3875b"} Dec 11 11:38:12 crc kubenswrapper[5016]: I1211 11:38:12.698006 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:12 crc kubenswrapper[5016]: I1211 11:38:12.698067 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:12 crc kubenswrapper[5016]: I1211 11:38:12.933096 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:38:12 crc kubenswrapper[5016]: I1211 11:38:12.933164 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:38:13 crc kubenswrapper[5016]: I1211 11:38:13.499361 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/crc-debug-4sgl6" Dec 11 11:38:13 crc kubenswrapper[5016]: I1211 11:38:13.535648 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pq25m/crc-debug-4sgl6"] Dec 11 11:38:13 crc kubenswrapper[5016]: I1211 11:38:13.546970 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pq25m/crc-debug-4sgl6"] Dec 11 11:38:13 crc kubenswrapper[5016]: I1211 11:38:13.576238 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759-host\") pod \"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759\" (UID: \"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759\") " Dec 11 11:38:13 crc kubenswrapper[5016]: I1211 11:38:13.576356 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759-host" (OuterVolumeSpecName: "host") pod "01d3a33d-7d89-4ca0-a219-3ae3d9f1d759" (UID: "01d3a33d-7d89-4ca0-a219-3ae3d9f1d759"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 11:38:13 crc kubenswrapper[5016]: I1211 11:38:13.576511 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27592\" (UniqueName: \"kubernetes.io/projected/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759-kube-api-access-27592\") pod \"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759\" (UID: \"01d3a33d-7d89-4ca0-a219-3ae3d9f1d759\") " Dec 11 11:38:13 crc kubenswrapper[5016]: I1211 11:38:13.577103 5016 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759-host\") on node \"crc\" DevicePath \"\"" Dec 11 11:38:13 crc kubenswrapper[5016]: I1211 11:38:13.583407 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759-kube-api-access-27592" (OuterVolumeSpecName: "kube-api-access-27592") pod "01d3a33d-7d89-4ca0-a219-3ae3d9f1d759" (UID: "01d3a33d-7d89-4ca0-a219-3ae3d9f1d759"). InnerVolumeSpecName "kube-api-access-27592". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:38:13 crc kubenswrapper[5016]: I1211 11:38:13.679733 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27592\" (UniqueName: \"kubernetes.io/projected/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759-kube-api-access-27592\") on node \"crc\" DevicePath \"\"" Dec 11 11:38:13 crc kubenswrapper[5016]: I1211 11:38:13.741258 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-r6vfz" podUID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" containerName="registry-server" probeResult="failure" output=< Dec 11 11:38:13 crc kubenswrapper[5016]: timeout: failed to connect service ":50051" within 1s Dec 11 11:38:13 crc kubenswrapper[5016]: > Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.407375 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02e31859ff8182149dd522539b811b38c37f5f75435213927afee53a0d08b1ff" Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.407430 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/crc-debug-4sgl6" Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.721886 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pq25m/crc-debug-whljf"] Dec 11 11:38:14 crc kubenswrapper[5016]: E1211 11:38:14.722865 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01d3a33d-7d89-4ca0-a219-3ae3d9f1d759" containerName="container-00" Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.722878 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="01d3a33d-7d89-4ca0-a219-3ae3d9f1d759" containerName="container-00" Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.723138 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="01d3a33d-7d89-4ca0-a219-3ae3d9f1d759" containerName="container-00" Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.723896 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/crc-debug-whljf" Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.726430 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-pq25m"/"default-dockercfg-kqt7n" Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.803419 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lcp7\" (UniqueName: \"kubernetes.io/projected/25fd3891-ef7f-4128-8f17-4e3ac41495a6-kube-api-access-4lcp7\") pod \"crc-debug-whljf\" (UID: \"25fd3891-ef7f-4128-8f17-4e3ac41495a6\") " pod="openshift-must-gather-pq25m/crc-debug-whljf" Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.803477 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/25fd3891-ef7f-4128-8f17-4e3ac41495a6-host\") pod \"crc-debug-whljf\" (UID: \"25fd3891-ef7f-4128-8f17-4e3ac41495a6\") " pod="openshift-must-gather-pq25m/crc-debug-whljf" Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.906297 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lcp7\" (UniqueName: \"kubernetes.io/projected/25fd3891-ef7f-4128-8f17-4e3ac41495a6-kube-api-access-4lcp7\") pod \"crc-debug-whljf\" (UID: \"25fd3891-ef7f-4128-8f17-4e3ac41495a6\") " pod="openshift-must-gather-pq25m/crc-debug-whljf" Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.906359 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/25fd3891-ef7f-4128-8f17-4e3ac41495a6-host\") pod \"crc-debug-whljf\" (UID: \"25fd3891-ef7f-4128-8f17-4e3ac41495a6\") " pod="openshift-must-gather-pq25m/crc-debug-whljf" Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.906465 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/25fd3891-ef7f-4128-8f17-4e3ac41495a6-host\") pod \"crc-debug-whljf\" (UID: \"25fd3891-ef7f-4128-8f17-4e3ac41495a6\") " pod="openshift-must-gather-pq25m/crc-debug-whljf" Dec 11 11:38:14 crc kubenswrapper[5016]: I1211 11:38:14.932423 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lcp7\" (UniqueName: \"kubernetes.io/projected/25fd3891-ef7f-4128-8f17-4e3ac41495a6-kube-api-access-4lcp7\") pod \"crc-debug-whljf\" (UID: \"25fd3891-ef7f-4128-8f17-4e3ac41495a6\") " pod="openshift-must-gather-pq25m/crc-debug-whljf" Dec 11 11:38:15 crc kubenswrapper[5016]: I1211 11:38:15.043876 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/crc-debug-whljf" Dec 11 11:38:15 crc kubenswrapper[5016]: W1211 11:38:15.104697 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25fd3891_ef7f_4128_8f17_4e3ac41495a6.slice/crio-a94b2fdd4d88b3891a2879008efb3d7b6fef0cf46a00d7c1849942cdee851340 WatchSource:0}: Error finding container a94b2fdd4d88b3891a2879008efb3d7b6fef0cf46a00d7c1849942cdee851340: Status 404 returned error can't find the container with id a94b2fdd4d88b3891a2879008efb3d7b6fef0cf46a00d7c1849942cdee851340 Dec 11 11:38:15 crc kubenswrapper[5016]: I1211 11:38:15.420634 5016 generic.go:334] "Generic (PLEG): container finished" podID="25fd3891-ef7f-4128-8f17-4e3ac41495a6" containerID="2ae03d4b556f6469d86694bf9124fc366d661bc630fa22ba3058509a2a9b3e92" exitCode=0 Dec 11 11:38:15 crc kubenswrapper[5016]: I1211 11:38:15.420721 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pq25m/crc-debug-whljf" event={"ID":"25fd3891-ef7f-4128-8f17-4e3ac41495a6","Type":"ContainerDied","Data":"2ae03d4b556f6469d86694bf9124fc366d661bc630fa22ba3058509a2a9b3e92"} Dec 11 11:38:15 crc kubenswrapper[5016]: I1211 11:38:15.421074 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pq25m/crc-debug-whljf" event={"ID":"25fd3891-ef7f-4128-8f17-4e3ac41495a6","Type":"ContainerStarted","Data":"a94b2fdd4d88b3891a2879008efb3d7b6fef0cf46a00d7c1849942cdee851340"} Dec 11 11:38:15 crc kubenswrapper[5016]: I1211 11:38:15.488236 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01d3a33d-7d89-4ca0-a219-3ae3d9f1d759" path="/var/lib/kubelet/pods/01d3a33d-7d89-4ca0-a219-3ae3d9f1d759/volumes" Dec 11 11:38:15 crc kubenswrapper[5016]: I1211 11:38:15.488922 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pq25m/crc-debug-whljf"] Dec 11 11:38:15 crc kubenswrapper[5016]: I1211 11:38:15.492205 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pq25m/crc-debug-whljf"] Dec 11 11:38:16 crc kubenswrapper[5016]: I1211 11:38:16.563447 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/crc-debug-whljf" Dec 11 11:38:16 crc kubenswrapper[5016]: I1211 11:38:16.640341 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/25fd3891-ef7f-4128-8f17-4e3ac41495a6-host\") pod \"25fd3891-ef7f-4128-8f17-4e3ac41495a6\" (UID: \"25fd3891-ef7f-4128-8f17-4e3ac41495a6\") " Dec 11 11:38:16 crc kubenswrapper[5016]: I1211 11:38:16.640546 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4lcp7\" (UniqueName: \"kubernetes.io/projected/25fd3891-ef7f-4128-8f17-4e3ac41495a6-kube-api-access-4lcp7\") pod \"25fd3891-ef7f-4128-8f17-4e3ac41495a6\" (UID: \"25fd3891-ef7f-4128-8f17-4e3ac41495a6\") " Dec 11 11:38:16 crc kubenswrapper[5016]: I1211 11:38:16.641198 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25fd3891-ef7f-4128-8f17-4e3ac41495a6-host" (OuterVolumeSpecName: "host") pod "25fd3891-ef7f-4128-8f17-4e3ac41495a6" (UID: "25fd3891-ef7f-4128-8f17-4e3ac41495a6"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 11:38:16 crc kubenswrapper[5016]: I1211 11:38:16.641855 5016 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/25fd3891-ef7f-4128-8f17-4e3ac41495a6-host\") on node \"crc\" DevicePath \"\"" Dec 11 11:38:16 crc kubenswrapper[5016]: I1211 11:38:16.652244 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25fd3891-ef7f-4128-8f17-4e3ac41495a6-kube-api-access-4lcp7" (OuterVolumeSpecName: "kube-api-access-4lcp7") pod "25fd3891-ef7f-4128-8f17-4e3ac41495a6" (UID: "25fd3891-ef7f-4128-8f17-4e3ac41495a6"). InnerVolumeSpecName "kube-api-access-4lcp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:38:16 crc kubenswrapper[5016]: I1211 11:38:16.743254 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4lcp7\" (UniqueName: \"kubernetes.io/projected/25fd3891-ef7f-4128-8f17-4e3ac41495a6-kube-api-access-4lcp7\") on node \"crc\" DevicePath \"\"" Dec 11 11:38:17 crc kubenswrapper[5016]: I1211 11:38:17.449558 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a94b2fdd4d88b3891a2879008efb3d7b6fef0cf46a00d7c1849942cdee851340" Dec 11 11:38:17 crc kubenswrapper[5016]: I1211 11:38:17.449640 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/crc-debug-whljf" Dec 11 11:38:17 crc kubenswrapper[5016]: I1211 11:38:17.484828 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25fd3891-ef7f-4128-8f17-4e3ac41495a6" path="/var/lib/kubelet/pods/25fd3891-ef7f-4128-8f17-4e3ac41495a6/volumes" Dec 11 11:38:22 crc kubenswrapper[5016]: I1211 11:38:22.747791 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:22 crc kubenswrapper[5016]: I1211 11:38:22.807799 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:22 crc kubenswrapper[5016]: I1211 11:38:22.992350 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r6vfz"] Dec 11 11:38:24 crc kubenswrapper[5016]: I1211 11:38:24.544973 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r6vfz" podUID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" containerName="registry-server" containerID="cri-o://863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023" gracePeriod=2 Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.050072 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.120362 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8nz6\" (UniqueName: \"kubernetes.io/projected/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-kube-api-access-h8nz6\") pod \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\" (UID: \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\") " Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.120473 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-catalog-content\") pod \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\" (UID: \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\") " Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.120782 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-utilities\") pod \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\" (UID: \"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8\") " Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.122038 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-utilities" (OuterVolumeSpecName: "utilities") pod "0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" (UID: "0d80b14a-2be2-41bd-bc94-c1027fbeb5d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.158166 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-kube-api-access-h8nz6" (OuterVolumeSpecName: "kube-api-access-h8nz6") pod "0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" (UID: "0d80b14a-2be2-41bd-bc94-c1027fbeb5d8"). InnerVolumeSpecName "kube-api-access-h8nz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.223866 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8nz6\" (UniqueName: \"kubernetes.io/projected/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-kube-api-access-h8nz6\") on node \"crc\" DevicePath \"\"" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.223910 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.283365 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" (UID: "0d80b14a-2be2-41bd-bc94-c1027fbeb5d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.326224 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.555644 5016 generic.go:334] "Generic (PLEG): container finished" podID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" containerID="863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023" exitCode=0 Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.555686 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6vfz" event={"ID":"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8","Type":"ContainerDied","Data":"863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023"} Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.555705 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r6vfz" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.555723 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6vfz" event={"ID":"0d80b14a-2be2-41bd-bc94-c1027fbeb5d8","Type":"ContainerDied","Data":"a0968f5fe0917d42e2e750a3a2a9968735dc41a51758c58b05064e5a9844cb76"} Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.555743 5016 scope.go:117] "RemoveContainer" containerID="863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.603148 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r6vfz"] Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.608769 5016 scope.go:117] "RemoveContainer" containerID="f74406c1609bceba39e3ecbcdedb83b93530e93bc1dbbe410f6dba8589b12fb5" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.643324 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r6vfz"] Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.646182 5016 scope.go:117] "RemoveContainer" containerID="fc5b4955321623838f42a25a06fee7c54cd6acd9a5b7822f6925fa25b45f99a6" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.684372 5016 scope.go:117] "RemoveContainer" containerID="863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023" Dec 11 11:38:25 crc kubenswrapper[5016]: E1211 11:38:25.684962 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023\": container with ID starting with 863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023 not found: ID does not exist" containerID="863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.685068 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023"} err="failed to get container status \"863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023\": rpc error: code = NotFound desc = could not find container \"863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023\": container with ID starting with 863782e98932c892decf76df5d535fd8d818e8258cf987722deb87804c14a023 not found: ID does not exist" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.685128 5016 scope.go:117] "RemoveContainer" containerID="f74406c1609bceba39e3ecbcdedb83b93530e93bc1dbbe410f6dba8589b12fb5" Dec 11 11:38:25 crc kubenswrapper[5016]: E1211 11:38:25.686473 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f74406c1609bceba39e3ecbcdedb83b93530e93bc1dbbe410f6dba8589b12fb5\": container with ID starting with f74406c1609bceba39e3ecbcdedb83b93530e93bc1dbbe410f6dba8589b12fb5 not found: ID does not exist" containerID="f74406c1609bceba39e3ecbcdedb83b93530e93bc1dbbe410f6dba8589b12fb5" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.686499 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f74406c1609bceba39e3ecbcdedb83b93530e93bc1dbbe410f6dba8589b12fb5"} err="failed to get container status \"f74406c1609bceba39e3ecbcdedb83b93530e93bc1dbbe410f6dba8589b12fb5\": rpc error: code = NotFound desc = could not find container \"f74406c1609bceba39e3ecbcdedb83b93530e93bc1dbbe410f6dba8589b12fb5\": container with ID starting with f74406c1609bceba39e3ecbcdedb83b93530e93bc1dbbe410f6dba8589b12fb5 not found: ID does not exist" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.686513 5016 scope.go:117] "RemoveContainer" containerID="fc5b4955321623838f42a25a06fee7c54cd6acd9a5b7822f6925fa25b45f99a6" Dec 11 11:38:25 crc kubenswrapper[5016]: E1211 11:38:25.686929 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc5b4955321623838f42a25a06fee7c54cd6acd9a5b7822f6925fa25b45f99a6\": container with ID starting with fc5b4955321623838f42a25a06fee7c54cd6acd9a5b7822f6925fa25b45f99a6 not found: ID does not exist" containerID="fc5b4955321623838f42a25a06fee7c54cd6acd9a5b7822f6925fa25b45f99a6" Dec 11 11:38:25 crc kubenswrapper[5016]: I1211 11:38:25.686967 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc5b4955321623838f42a25a06fee7c54cd6acd9a5b7822f6925fa25b45f99a6"} err="failed to get container status \"fc5b4955321623838f42a25a06fee7c54cd6acd9a5b7822f6925fa25b45f99a6\": rpc error: code = NotFound desc = could not find container \"fc5b4955321623838f42a25a06fee7c54cd6acd9a5b7822f6925fa25b45f99a6\": container with ID starting with fc5b4955321623838f42a25a06fee7c54cd6acd9a5b7822f6925fa25b45f99a6 not found: ID does not exist" Dec 11 11:38:27 crc kubenswrapper[5016]: I1211 11:38:27.490037 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" path="/var/lib/kubelet/pods/0d80b14a-2be2-41bd-bc94-c1027fbeb5d8/volumes" Dec 11 11:38:31 crc kubenswrapper[5016]: I1211 11:38:31.573463 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-75d7945896-vvw5x_42eab2b8-1142-4d4f-bb8a-58736349fd7e/barbican-api/0.log" Dec 11 11:38:31 crc kubenswrapper[5016]: I1211 11:38:31.810506 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-75d7945896-vvw5x_42eab2b8-1142-4d4f-bb8a-58736349fd7e/barbican-api-log/0.log" Dec 11 11:38:31 crc kubenswrapper[5016]: I1211 11:38:31.903318 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-559df4c4fd-rpdct_2ad5059d-bfd5-4ea8-8d6a-898cd592e49d/barbican-keystone-listener-log/0.log" Dec 11 11:38:31 crc kubenswrapper[5016]: I1211 11:38:31.906335 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-559df4c4fd-rpdct_2ad5059d-bfd5-4ea8-8d6a-898cd592e49d/barbican-keystone-listener/0.log" Dec 11 11:38:32 crc kubenswrapper[5016]: I1211 11:38:32.116990 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5f9f9b6559-f78rz_266c2ca6-fea6-4f3d-8796-bd0db83f2bf0/barbican-worker/0.log" Dec 11 11:38:32 crc kubenswrapper[5016]: I1211 11:38:32.188494 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5f9f9b6559-f78rz_266c2ca6-fea6-4f3d-8796-bd0db83f2bf0/barbican-worker-log/0.log" Dec 11 11:38:32 crc kubenswrapper[5016]: I1211 11:38:32.364071 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b_10792fe7-d5d5-4918-8658-20331647f302/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:32 crc kubenswrapper[5016]: I1211 11:38:32.427794 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_fd3155c0-9091-4e5e-888d-67b0256b0b51/ceilometer-central-agent/0.log" Dec 11 11:38:32 crc kubenswrapper[5016]: I1211 11:38:32.477875 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_fd3155c0-9091-4e5e-888d-67b0256b0b51/ceilometer-notification-agent/0.log" Dec 11 11:38:32 crc kubenswrapper[5016]: I1211 11:38:32.613621 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_fd3155c0-9091-4e5e-888d-67b0256b0b51/proxy-httpd/0.log" Dec 11 11:38:32 crc kubenswrapper[5016]: I1211 11:38:32.645754 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_fd3155c0-9091-4e5e-888d-67b0256b0b51/sg-core/0.log" Dec 11 11:38:32 crc kubenswrapper[5016]: I1211 11:38:32.739005 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_40789e09-e7ca-4ce3-8939-9ab2605e257f/cinder-api/0.log" Dec 11 11:38:32 crc kubenswrapper[5016]: I1211 11:38:32.943553 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_40789e09-e7ca-4ce3-8939-9ab2605e257f/cinder-api-log/0.log" Dec 11 11:38:32 crc kubenswrapper[5016]: I1211 11:38:32.959895 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_0f265b9d-c475-455f-9fe7-05070efd4ec1/cinder-scheduler/0.log" Dec 11 11:38:33 crc kubenswrapper[5016]: I1211 11:38:33.023678 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_0f265b9d-c475-455f-9fe7-05070efd4ec1/probe/0.log" Dec 11 11:38:33 crc kubenswrapper[5016]: I1211 11:38:33.206473 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh_6b68b5b9-fe7e-4340-8541-71c6f8b80f3f/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:33 crc kubenswrapper[5016]: I1211 11:38:33.306611 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-rckrh_baa674c6-426d-428e-af4a-dbff72b93714/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:33 crc kubenswrapper[5016]: I1211 11:38:33.528993 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-8nsnc_18f4fb70-2aaa-471a-9556-b0977ad6ec55/init/0.log" Dec 11 11:38:33 crc kubenswrapper[5016]: I1211 11:38:33.653994 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-8nsnc_18f4fb70-2aaa-471a-9556-b0977ad6ec55/init/0.log" Dec 11 11:38:33 crc kubenswrapper[5016]: I1211 11:38:33.716820 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-8nsnc_18f4fb70-2aaa-471a-9556-b0977ad6ec55/dnsmasq-dns/0.log" Dec 11 11:38:33 crc kubenswrapper[5016]: I1211 11:38:33.810956 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv_43a2a77d-f6c4-40ba-8258-ee6bced589f2/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:33 crc kubenswrapper[5016]: I1211 11:38:33.933794 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_67cb2370-3bd3-4105-9369-3b99535ed13f/glance-httpd/0.log" Dec 11 11:38:34 crc kubenswrapper[5016]: I1211 11:38:34.034393 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_67cb2370-3bd3-4105-9369-3b99535ed13f/glance-log/0.log" Dec 11 11:38:34 crc kubenswrapper[5016]: I1211 11:38:34.189999 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_3243e41d-6485-4353-993a-11f309322b5f/glance-log/0.log" Dec 11 11:38:34 crc kubenswrapper[5016]: I1211 11:38:34.202603 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_3243e41d-6485-4353-993a-11f309322b5f/glance-httpd/0.log" Dec 11 11:38:34 crc kubenswrapper[5016]: I1211 11:38:34.451629 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7df5fc4844-wdnrz_02741cc6-3a2a-48c1-b492-57762e0d75e6/horizon/0.log" Dec 11 11:38:34 crc kubenswrapper[5016]: I1211 11:38:34.569083 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg_a31cb907-f20d-44a6-abc0-53951fe5e793/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:34 crc kubenswrapper[5016]: I1211 11:38:34.842449 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7df5fc4844-wdnrz_02741cc6-3a2a-48c1-b492-57762e0d75e6/horizon-log/0.log" Dec 11 11:38:34 crc kubenswrapper[5016]: I1211 11:38:34.873524 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-8rmhc_b4760482-fee8-4399-bae9-a30831f41536/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:35 crc kubenswrapper[5016]: I1211 11:38:35.154040 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29424181-nmfs5_59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2/keystone-cron/0.log" Dec 11 11:38:35 crc kubenswrapper[5016]: I1211 11:38:35.209155 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-589444b9f8-c7wwh_207fc475-2260-4b2f-86a9-c4c0bedf3ce1/keystone-api/0.log" Dec 11 11:38:35 crc kubenswrapper[5016]: I1211 11:38:35.381431 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_1f3e95ec-e5d3-44ab-ae44-1279b0a04e75/kube-state-metrics/0.log" Dec 11 11:38:35 crc kubenswrapper[5016]: I1211 11:38:35.484893 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd_ff52a65c-c0b6-4d71-8038-b8c079cd1d64/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:35 crc kubenswrapper[5016]: I1211 11:38:35.867572 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5d4bc555dc-hjmj8_a420329b-5657-402b-8b2c-c6f53beda0d6/neutron-api/0.log" Dec 11 11:38:36 crc kubenswrapper[5016]: I1211 11:38:36.013162 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5d4bc555dc-hjmj8_a420329b-5657-402b-8b2c-c6f53beda0d6/neutron-httpd/0.log" Dec 11 11:38:36 crc kubenswrapper[5016]: I1211 11:38:36.174758 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2_24b4bd76-ba99-43ad-91e9-4fdf518a6935/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:36 crc kubenswrapper[5016]: I1211 11:38:36.768139 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_dfdff31d-6c59-4f13-ba0c-e5791bd7fedd/nova-cell0-conductor-conductor/0.log" Dec 11 11:38:36 crc kubenswrapper[5016]: I1211 11:38:36.800157 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_e23cc12c-b028-49ec-ba40-adb9ad2baf59/nova-api-log/0.log" Dec 11 11:38:36 crc kubenswrapper[5016]: I1211 11:38:36.980308 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_e23cc12c-b028-49ec-ba40-adb9ad2baf59/nova-api-api/0.log" Dec 11 11:38:37 crc kubenswrapper[5016]: I1211 11:38:37.105998 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d/nova-cell1-novncproxy-novncproxy/0.log" Dec 11 11:38:37 crc kubenswrapper[5016]: I1211 11:38:37.116405 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_9b21ed74-421c-4bbc-b17e-317beee96ae7/nova-cell1-conductor-conductor/0.log" Dec 11 11:38:37 crc kubenswrapper[5016]: I1211 11:38:37.398871 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-kwsxl_2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6/nova-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:37 crc kubenswrapper[5016]: I1211 11:38:37.475149 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_b74b056a-931e-4c8f-809d-025693ae2e9c/nova-metadata-log/0.log" Dec 11 11:38:37 crc kubenswrapper[5016]: I1211 11:38:37.830819 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_cd7590f4-6fdc-450e-8a96-e4ca6315d644/nova-scheduler-scheduler/0.log" Dec 11 11:38:37 crc kubenswrapper[5016]: I1211 11:38:37.990062 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_be590587-03d9-4391-98b3-bacb7432ec51/mysql-bootstrap/0.log" Dec 11 11:38:38 crc kubenswrapper[5016]: I1211 11:38:38.233027 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_be590587-03d9-4391-98b3-bacb7432ec51/mysql-bootstrap/0.log" Dec 11 11:38:38 crc kubenswrapper[5016]: I1211 11:38:38.293078 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_be590587-03d9-4391-98b3-bacb7432ec51/galera/0.log" Dec 11 11:38:38 crc kubenswrapper[5016]: I1211 11:38:38.509498 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd/mysql-bootstrap/0.log" Dec 11 11:38:38 crc kubenswrapper[5016]: I1211 11:38:38.734815 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd/mysql-bootstrap/0.log" Dec 11 11:38:38 crc kubenswrapper[5016]: I1211 11:38:38.781787 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_b74b056a-931e-4c8f-809d-025693ae2e9c/nova-metadata-metadata/0.log" Dec 11 11:38:38 crc kubenswrapper[5016]: I1211 11:38:38.809076 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd/galera/0.log" Dec 11 11:38:38 crc kubenswrapper[5016]: I1211 11:38:38.935684 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_5ab24d20-cfe0-4aeb-a0df-e0d0b245e863/openstackclient/0.log" Dec 11 11:38:39 crc kubenswrapper[5016]: I1211 11:38:39.027347 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-g76kk_cf0694a8-c7ff-429f-a52f-5885a8dcb3ac/ovn-controller/0.log" Dec 11 11:38:39 crc kubenswrapper[5016]: I1211 11:38:39.241289 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-rg5mz_d49703c4-2744-4669-baae-fc1ee5932f5d/openstack-network-exporter/0.log" Dec 11 11:38:39 crc kubenswrapper[5016]: I1211 11:38:39.343397 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vbtwd_4f718adb-56cf-4983-bd6a-e750e06edad7/ovsdb-server-init/0.log" Dec 11 11:38:39 crc kubenswrapper[5016]: I1211 11:38:39.544620 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vbtwd_4f718adb-56cf-4983-bd6a-e750e06edad7/ovs-vswitchd/0.log" Dec 11 11:38:39 crc kubenswrapper[5016]: I1211 11:38:39.584469 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vbtwd_4f718adb-56cf-4983-bd6a-e750e06edad7/ovsdb-server-init/0.log" Dec 11 11:38:39 crc kubenswrapper[5016]: I1211 11:38:39.603491 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vbtwd_4f718adb-56cf-4983-bd6a-e750e06edad7/ovsdb-server/0.log" Dec 11 11:38:39 crc kubenswrapper[5016]: I1211 11:38:39.799855 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-b97tz_68a2fe3a-3815-4605-b685-2ffe583f46d4/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:39 crc kubenswrapper[5016]: I1211 11:38:39.835883 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_817d89c2-920a-49a9-b87d-308f48847b2f/openstack-network-exporter/0.log" Dec 11 11:38:39 crc kubenswrapper[5016]: I1211 11:38:39.899002 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_817d89c2-920a-49a9-b87d-308f48847b2f/ovn-northd/0.log" Dec 11 11:38:40 crc kubenswrapper[5016]: I1211 11:38:40.077245 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_65ac3f0e-4016-4586-b742-2c52252ed51b/ovsdbserver-nb/0.log" Dec 11 11:38:40 crc kubenswrapper[5016]: I1211 11:38:40.084814 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_65ac3f0e-4016-4586-b742-2c52252ed51b/openstack-network-exporter/0.log" Dec 11 11:38:40 crc kubenswrapper[5016]: I1211 11:38:40.309106 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_de7b514e-0bc7-4260-9bc4-9c0f1b13562b/openstack-network-exporter/0.log" Dec 11 11:38:40 crc kubenswrapper[5016]: I1211 11:38:40.343390 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_de7b514e-0bc7-4260-9bc4-9c0f1b13562b/ovsdbserver-sb/0.log" Dec 11 11:38:40 crc kubenswrapper[5016]: I1211 11:38:40.502618 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-676fd6784-tg4g7_668906e8-a695-43ee-aca4-5b1bd13053eb/placement-api/0.log" Dec 11 11:38:40 crc kubenswrapper[5016]: I1211 11:38:40.678240 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-676fd6784-tg4g7_668906e8-a695-43ee-aca4-5b1bd13053eb/placement-log/0.log" Dec 11 11:38:40 crc kubenswrapper[5016]: I1211 11:38:40.796878 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_24d5919d-ee3d-4023-9a6b-bc1d9838b2ce/setup-container/0.log" Dec 11 11:38:40 crc kubenswrapper[5016]: I1211 11:38:40.937828 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_24d5919d-ee3d-4023-9a6b-bc1d9838b2ce/setup-container/0.log" Dec 11 11:38:41 crc kubenswrapper[5016]: I1211 11:38:41.014151 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_24d5919d-ee3d-4023-9a6b-bc1d9838b2ce/rabbitmq/0.log" Dec 11 11:38:41 crc kubenswrapper[5016]: I1211 11:38:41.081786 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_86d18250-4387-46f7-af2c-2ce21bf43e12/setup-container/0.log" Dec 11 11:38:41 crc kubenswrapper[5016]: I1211 11:38:41.309512 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_86d18250-4387-46f7-af2c-2ce21bf43e12/setup-container/0.log" Dec 11 11:38:41 crc kubenswrapper[5016]: I1211 11:38:41.322432 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_86d18250-4387-46f7-af2c-2ce21bf43e12/rabbitmq/0.log" Dec 11 11:38:41 crc kubenswrapper[5016]: I1211 11:38:41.422577 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8_1965e849-9439-404f-96f1-d5ced3154038/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:41 crc kubenswrapper[5016]: I1211 11:38:41.553055 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-26jgb_e240bd3c-2bc0-4e00-b092-51ab30da277d/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:41 crc kubenswrapper[5016]: I1211 11:38:41.664365 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl_b8f46431-27eb-4bb3-952a-3dd405e15121/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:41 crc kubenswrapper[5016]: I1211 11:38:41.869047 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-94jbb_f43b972e-9584-45ea-a540-cc2facfb7ec5/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:41 crc kubenswrapper[5016]: I1211 11:38:41.934642 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-lz69w_79db9d17-e0eb-40f5-88ca-5f222544e2b1/ssh-known-hosts-edpm-deployment/0.log" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.213830 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-58f98f7fd9-rtbw4_2816d686-f2da-4306-9b07-b27dc9eb88f5/proxy-server/0.log" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.276957 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-58f98f7fd9-rtbw4_2816d686-f2da-4306-9b07-b27dc9eb88f5/proxy-httpd/0.log" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.338549 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-cfbpr_8d68a71e-cbcb-4ce9-bb01-3b48154074a4/swift-ring-rebalance/0.log" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.456288 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/account-auditor/0.log" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.545852 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/account-reaper/0.log" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.611268 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/account-replicator/0.log" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.692429 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/container-auditor/0.log" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.735154 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/account-server/0.log" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.795373 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/container-replicator/0.log" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.877972 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/container-server/0.log" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.932614 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.932972 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.942297 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/container-updater/0.log" Dec 11 11:38:42 crc kubenswrapper[5016]: I1211 11:38:42.990440 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/object-auditor/0.log" Dec 11 11:38:43 crc kubenswrapper[5016]: I1211 11:38:43.093634 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/object-expirer/0.log" Dec 11 11:38:43 crc kubenswrapper[5016]: I1211 11:38:43.192212 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/object-replicator/0.log" Dec 11 11:38:43 crc kubenswrapper[5016]: I1211 11:38:43.238887 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/object-server/0.log" Dec 11 11:38:43 crc kubenswrapper[5016]: I1211 11:38:43.272415 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/object-updater/0.log" Dec 11 11:38:43 crc kubenswrapper[5016]: I1211 11:38:43.358448 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/rsync/0.log" Dec 11 11:38:43 crc kubenswrapper[5016]: I1211 11:38:43.453579 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/swift-recon-cron/0.log" Dec 11 11:38:43 crc kubenswrapper[5016]: I1211 11:38:43.575345 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr_604b9ba2-ab41-4901-a9ef-9eb82bee5e4a/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:43 crc kubenswrapper[5016]: I1211 11:38:43.849661 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_d9613e90-5366-4f68-80dd-f66a7541a670/tempest-tests-tempest-tests-runner/0.log" Dec 11 11:38:44 crc kubenswrapper[5016]: I1211 11:38:44.002341 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_51a74a7d-9731-443f-b85f-99645084064a/test-operator-logs-container/0.log" Dec 11 11:38:44 crc kubenswrapper[5016]: I1211 11:38:44.114443 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql_53387d31-b49f-4100-9772-a4f7d6898471/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:38:52 crc kubenswrapper[5016]: I1211 11:38:52.920211 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_f0588b90-e0f3-49e1-9ff9-76e8aac23b93/memcached/0.log" Dec 11 11:39:12 crc kubenswrapper[5016]: I1211 11:39:12.910587 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/util/0.log" Dec 11 11:39:12 crc kubenswrapper[5016]: I1211 11:39:12.932761 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:39:12 crc kubenswrapper[5016]: I1211 11:39:12.932852 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:39:12 crc kubenswrapper[5016]: I1211 11:39:12.932922 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 11:39:12 crc kubenswrapper[5016]: I1211 11:39:12.934242 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 11:39:12 crc kubenswrapper[5016]: I1211 11:39:12.934348 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" gracePeriod=600 Dec 11 11:39:13 crc kubenswrapper[5016]: E1211 11:39:13.058362 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.082091 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" exitCode=0 Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.082155 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03"} Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.082374 5016 scope.go:117] "RemoveContainer" containerID="bb4758d65cf21f849ad86ea304787576e73defababc5c9ad45257a22b121f4c9" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.085357 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:39:13 crc kubenswrapper[5016]: E1211 11:39:13.085612 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:39:13 crc kubenswrapper[5016]: E1211 11:39:13.108175 5016 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode679c083_2480_4bc8_a8ea_dc2ff0412508.slice/crio-conmon-4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode679c083_2480_4bc8_a8ea_dc2ff0412508.slice/crio-4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03.scope\": RecentStats: unable to find data in memory cache]" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.192043 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/pull/0.log" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.193464 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/util/0.log" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.232263 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/pull/0.log" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.461422 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/util/0.log" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.466248 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/extract/0.log" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.486673 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/pull/0.log" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.694416 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-qtvrv_cc7c5322-f255-4c02-b684-d1bccf74eb1a/manager/0.log" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.700514 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-qtvrv_cc7c5322-f255-4c02-b684-d1bccf74eb1a/kube-rbac-proxy/0.log" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.717301 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-rzjg8_de234c3f-f96c-444d-a7f5-a453df14d2e4/kube-rbac-proxy/0.log" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.910830 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-4hwxf_251e6e53-bbba-4d67-a361-44c471db70ff/manager/0.log" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.941716 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-rzjg8_de234c3f-f96c-444d-a7f5-a453df14d2e4/manager/0.log" Dec 11 11:39:13 crc kubenswrapper[5016]: I1211 11:39:13.955408 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-4hwxf_251e6e53-bbba-4d67-a361-44c471db70ff/kube-rbac-proxy/0.log" Dec 11 11:39:14 crc kubenswrapper[5016]: I1211 11:39:14.152171 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-v8hsp_b1812840-a032-4c7a-a851-505f89b19063/kube-rbac-proxy/0.log" Dec 11 11:39:14 crc kubenswrapper[5016]: I1211 11:39:14.265113 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-v8hsp_b1812840-a032-4c7a-a851-505f89b19063/manager/0.log" Dec 11 11:39:14 crc kubenswrapper[5016]: I1211 11:39:14.302359 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-jpkdc_2f35a405-1590-4bd7-9f64-f897bac8e8e7/kube-rbac-proxy/0.log" Dec 11 11:39:14 crc kubenswrapper[5016]: I1211 11:39:14.361579 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-jpkdc_2f35a405-1590-4bd7-9f64-f897bac8e8e7/manager/0.log" Dec 11 11:39:14 crc kubenswrapper[5016]: I1211 11:39:14.435970 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-tcntz_daa29314-dcea-4026-9a51-7f9ceaed9052/kube-rbac-proxy/0.log" Dec 11 11:39:14 crc kubenswrapper[5016]: I1211 11:39:14.512647 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-tcntz_daa29314-dcea-4026-9a51-7f9ceaed9052/manager/0.log" Dec 11 11:39:14 crc kubenswrapper[5016]: I1211 11:39:14.630326 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-9s5rq_b44a8ea9-ba71-486d-9672-44146f09acb1/kube-rbac-proxy/0.log" Dec 11 11:39:14 crc kubenswrapper[5016]: I1211 11:39:14.864432 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-48q65_ac95cdf1-ed70-4d47-8b28-3f7f5e68804b/kube-rbac-proxy/0.log" Dec 11 11:39:14 crc kubenswrapper[5016]: I1211 11:39:14.878653 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-48q65_ac95cdf1-ed70-4d47-8b28-3f7f5e68804b/manager/0.log" Dec 11 11:39:14 crc kubenswrapper[5016]: I1211 11:39:14.913555 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-9s5rq_b44a8ea9-ba71-486d-9672-44146f09acb1/manager/0.log" Dec 11 11:39:15 crc kubenswrapper[5016]: I1211 11:39:15.114837 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-lx6dp_b2cd783c-ef38-4478-9f86-60374f554bb2/kube-rbac-proxy/0.log" Dec 11 11:39:15 crc kubenswrapper[5016]: I1211 11:39:15.135837 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-lx6dp_b2cd783c-ef38-4478-9f86-60374f554bb2/manager/0.log" Dec 11 11:39:15 crc kubenswrapper[5016]: I1211 11:39:15.300755 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-46n5n_95b9a24e-2b04-4161-aee4-2b7a73330a4e/kube-rbac-proxy/0.log" Dec 11 11:39:15 crc kubenswrapper[5016]: I1211 11:39:15.329686 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-46n5n_95b9a24e-2b04-4161-aee4-2b7a73330a4e/manager/0.log" Dec 11 11:39:15 crc kubenswrapper[5016]: I1211 11:39:15.393244 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-dfr98_26861a3b-3eb1-4c65-8c69-2d43a2aab77c/kube-rbac-proxy/0.log" Dec 11 11:39:15 crc kubenswrapper[5016]: I1211 11:39:15.747361 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-dfr98_26861a3b-3eb1-4c65-8c69-2d43a2aab77c/manager/0.log" Dec 11 11:39:15 crc kubenswrapper[5016]: I1211 11:39:15.819490 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-5qrxg_cdf76c07-0127-402e-90d7-9c868594b4d7/manager/0.log" Dec 11 11:39:15 crc kubenswrapper[5016]: I1211 11:39:15.838171 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-5qrxg_cdf76c07-0127-402e-90d7-9c868594b4d7/kube-rbac-proxy/0.log" Dec 11 11:39:16 crc kubenswrapper[5016]: I1211 11:39:16.084772 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-vbw9f_e4f0f2a5-a15b-45b8-96ea-91e37ea98237/kube-rbac-proxy/0.log" Dec 11 11:39:16 crc kubenswrapper[5016]: I1211 11:39:16.210327 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-vbw9f_e4f0f2a5-a15b-45b8-96ea-91e37ea98237/manager/0.log" Dec 11 11:39:16 crc kubenswrapper[5016]: I1211 11:39:16.344604 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-vcscv_4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19/manager/0.log" Dec 11 11:39:16 crc kubenswrapper[5016]: I1211 11:39:16.348531 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-vcscv_4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19/kube-rbac-proxy/0.log" Dec 11 11:39:16 crc kubenswrapper[5016]: I1211 11:39:16.446349 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fbtnxw_dfea8003-afd2-45aa-bd7b-dcc5460e8a80/kube-rbac-proxy/0.log" Dec 11 11:39:16 crc kubenswrapper[5016]: I1211 11:39:16.566032 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fbtnxw_dfea8003-afd2-45aa-bd7b-dcc5460e8a80/manager/0.log" Dec 11 11:39:16 crc kubenswrapper[5016]: I1211 11:39:16.991239 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-75cgw_3c73a135-7e40-4ba1-a674-0259ba8677db/registry-server/0.log" Dec 11 11:39:17 crc kubenswrapper[5016]: I1211 11:39:17.063770 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-b7dd9c5f4-ktl4m_2c43efae-bdbc-4043-b4fc-6e04c5f95003/operator/0.log" Dec 11 11:39:17 crc kubenswrapper[5016]: I1211 11:39:17.167178 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-wf9q5_f07a07a1-b235-4d36-a666-2b1be3363f34/kube-rbac-proxy/0.log" Dec 11 11:39:17 crc kubenswrapper[5016]: I1211 11:39:17.317405 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-wf9q5_f07a07a1-b235-4d36-a666-2b1be3363f34/manager/0.log" Dec 11 11:39:17 crc kubenswrapper[5016]: I1211 11:39:17.351598 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-kwflq_1099bcae-fea4-4864-8434-98ed888307e5/kube-rbac-proxy/0.log" Dec 11 11:39:17 crc kubenswrapper[5016]: I1211 11:39:17.479631 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-kwflq_1099bcae-fea4-4864-8434-98ed888307e5/manager/0.log" Dec 11 11:39:17 crc kubenswrapper[5016]: I1211 11:39:17.657787 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-rhkn6_e5d7cce6-369e-4837-8e40-385de0d684f7/operator/0.log" Dec 11 11:39:17 crc kubenswrapper[5016]: I1211 11:39:17.696628 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-5z7c4_1d57e8d7-5c81-4a4d-97a9-af4795392e5a/kube-rbac-proxy/0.log" Dec 11 11:39:17 crc kubenswrapper[5016]: I1211 11:39:17.811388 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-595db99498-vmll2_5adcacd2-730a-4cb7-9944-239289405003/manager/0.log" Dec 11 11:39:17 crc kubenswrapper[5016]: I1211 11:39:17.852312 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-58d5ff84df-6jmdx_5e9876fa-8ec4-432b-b582-6ee210b828b5/kube-rbac-proxy/0.log" Dec 11 11:39:17 crc kubenswrapper[5016]: I1211 11:39:17.922509 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-5z7c4_1d57e8d7-5c81-4a4d-97a9-af4795392e5a/manager/0.log" Dec 11 11:39:17 crc kubenswrapper[5016]: I1211 11:39:17.959677 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-58d5ff84df-6jmdx_5e9876fa-8ec4-432b-b582-6ee210b828b5/manager/0.log" Dec 11 11:39:18 crc kubenswrapper[5016]: I1211 11:39:18.049152 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-wphqn_542f9a19-fab3-426b-bb8a-e12a45e4e422/kube-rbac-proxy/0.log" Dec 11 11:39:18 crc kubenswrapper[5016]: I1211 11:39:18.114884 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-wphqn_542f9a19-fab3-426b-bb8a-e12a45e4e422/manager/0.log" Dec 11 11:39:18 crc kubenswrapper[5016]: I1211 11:39:18.168993 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-75944c9b7-92pjv_82d121a5-e733-445f-be6e-bc96e3c162e2/kube-rbac-proxy/0.log" Dec 11 11:39:18 crc kubenswrapper[5016]: I1211 11:39:18.276730 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-75944c9b7-92pjv_82d121a5-e733-445f-be6e-bc96e3c162e2/manager/0.log" Dec 11 11:39:26 crc kubenswrapper[5016]: I1211 11:39:26.474870 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:39:26 crc kubenswrapper[5016]: E1211 11:39:26.475823 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:39:38 crc kubenswrapper[5016]: I1211 11:39:38.474933 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:39:38 crc kubenswrapper[5016]: E1211 11:39:38.476164 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:39:39 crc kubenswrapper[5016]: I1211 11:39:39.839371 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-ljcrh_af75da0d-e4cb-4961-b57a-ea888c20af89/control-plane-machine-set-operator/2.log" Dec 11 11:39:39 crc kubenswrapper[5016]: I1211 11:39:39.917847 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-ljcrh_af75da0d-e4cb-4961-b57a-ea888c20af89/control-plane-machine-set-operator/1.log" Dec 11 11:39:40 crc kubenswrapper[5016]: I1211 11:39:40.112618 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-tn6f4_cb94a68f-794d-4e0f-9a65-aff1b885d021/machine-api-operator/0.log" Dec 11 11:39:40 crc kubenswrapper[5016]: I1211 11:39:40.112672 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-tn6f4_cb94a68f-794d-4e0f-9a65-aff1b885d021/kube-rbac-proxy/0.log" Dec 11 11:39:49 crc kubenswrapper[5016]: I1211 11:39:49.475260 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:39:49 crc kubenswrapper[5016]: E1211 11:39:49.476091 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:39:52 crc kubenswrapper[5016]: I1211 11:39:52.096302 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-sm8ss_f63479f5-0af6-4622-85fb-42bcfb115692/cert-manager-controller/0.log" Dec 11 11:39:52 crc kubenswrapper[5016]: I1211 11:39:52.269690 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-8pgz5_e7d758da-34fb-4507-83f4-1e5f948d9249/cert-manager-webhook/0.log" Dec 11 11:39:52 crc kubenswrapper[5016]: I1211 11:39:52.273583 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-dlzf6_058f486a-6a97-4bc7-9e43-65af0e4b5634/cert-manager-cainjector/0.log" Dec 11 11:40:03 crc kubenswrapper[5016]: I1211 11:40:03.484612 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:40:03 crc kubenswrapper[5016]: E1211 11:40:03.485671 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:40:05 crc kubenswrapper[5016]: I1211 11:40:05.820163 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6ff7998486-6wx52_35c9ce4d-504d-4813-b776-f5d07b9c3d1d/nmstate-console-plugin/0.log" Dec 11 11:40:06 crc kubenswrapper[5016]: I1211 11:40:06.003049 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-276mn_f541e158-4765-46f6-9a14-f6917fa4b1e3/nmstate-handler/0.log" Dec 11 11:40:06 crc kubenswrapper[5016]: I1211 11:40:06.021174 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-h52ww_32c0573d-b135-42ba-bec4-9092104e870c/kube-rbac-proxy/0.log" Dec 11 11:40:06 crc kubenswrapper[5016]: I1211 11:40:06.052813 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-h52ww_32c0573d-b135-42ba-bec4-9092104e870c/nmstate-metrics/0.log" Dec 11 11:40:06 crc kubenswrapper[5016]: I1211 11:40:06.190565 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-6769fb99d-bcg49_040df3df-7870-45d8-b15c-4f083db8385f/nmstate-operator/0.log" Dec 11 11:40:06 crc kubenswrapper[5016]: I1211 11:40:06.247810 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-f8fb84555-lz22l_671f4389-4dd6-45c7-8eda-d60191819517/nmstate-webhook/0.log" Dec 11 11:40:17 crc kubenswrapper[5016]: I1211 11:40:17.475602 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:40:17 crc kubenswrapper[5016]: E1211 11:40:17.476775 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:40:21 crc kubenswrapper[5016]: I1211 11:40:21.583340 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-skxpb_c4c659bc-4572-4852-8008-231dc642bbd7/kube-rbac-proxy/0.log" Dec 11 11:40:21 crc kubenswrapper[5016]: I1211 11:40:21.731404 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-skxpb_c4c659bc-4572-4852-8008-231dc642bbd7/controller/0.log" Dec 11 11:40:21 crc kubenswrapper[5016]: I1211 11:40:21.791925 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-frr-files/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.023641 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-metrics/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.051724 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-reloader/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.053404 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-frr-files/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.111416 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-reloader/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.232557 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-metrics/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.242236 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-frr-files/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.286128 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-reloader/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.326875 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-metrics/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.500569 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-frr-files/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.533532 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-reloader/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.551265 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-metrics/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.587405 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/controller/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.747881 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/frr-metrics/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.818742 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/kube-rbac-proxy-frr/0.log" Dec 11 11:40:22 crc kubenswrapper[5016]: I1211 11:40:22.827808 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/kube-rbac-proxy/0.log" Dec 11 11:40:23 crc kubenswrapper[5016]: I1211 11:40:23.045856 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/reloader/0.log" Dec 11 11:40:23 crc kubenswrapper[5016]: I1211 11:40:23.150718 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7784b6fcf-4c5v8_76b7a036-07e1-4a49-b5c4-39ed67ae34b6/frr-k8s-webhook-server/0.log" Dec 11 11:40:23 crc kubenswrapper[5016]: I1211 11:40:23.367433 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6fcfbcfbcf-dfwbt_bdd623de-6c7c-46b2-a168-fabbbf16ce6c/manager/0.log" Dec 11 11:40:23 crc kubenswrapper[5016]: I1211 11:40:23.507148 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7c896697b-mstrs_8b488ea8-aba6-430a-bb39-d1459ef2edea/webhook-server/0.log" Dec 11 11:40:23 crc kubenswrapper[5016]: I1211 11:40:23.668440 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tdtwb_c3691778-17ce-4c44-b8e1-f9f5a6727778/kube-rbac-proxy/0.log" Dec 11 11:40:24 crc kubenswrapper[5016]: I1211 11:40:24.192485 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/frr/0.log" Dec 11 11:40:24 crc kubenswrapper[5016]: I1211 11:40:24.235281 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tdtwb_c3691778-17ce-4c44-b8e1-f9f5a6727778/speaker/0.log" Dec 11 11:40:32 crc kubenswrapper[5016]: I1211 11:40:32.474783 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:40:32 crc kubenswrapper[5016]: E1211 11:40:32.475873 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:40:37 crc kubenswrapper[5016]: I1211 11:40:37.631596 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/util/0.log" Dec 11 11:40:37 crc kubenswrapper[5016]: I1211 11:40:37.826545 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/util/0.log" Dec 11 11:40:37 crc kubenswrapper[5016]: I1211 11:40:37.883964 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/pull/0.log" Dec 11 11:40:37 crc kubenswrapper[5016]: I1211 11:40:37.946818 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/pull/0.log" Dec 11 11:40:38 crc kubenswrapper[5016]: I1211 11:40:38.071206 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/pull/0.log" Dec 11 11:40:38 crc kubenswrapper[5016]: I1211 11:40:38.098089 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/util/0.log" Dec 11 11:40:38 crc kubenswrapper[5016]: I1211 11:40:38.103905 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/extract/0.log" Dec 11 11:40:38 crc kubenswrapper[5016]: I1211 11:40:38.316389 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/util/0.log" Dec 11 11:40:38 crc kubenswrapper[5016]: I1211 11:40:38.513881 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/util/0.log" Dec 11 11:40:38 crc kubenswrapper[5016]: I1211 11:40:38.561342 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/pull/0.log" Dec 11 11:40:38 crc kubenswrapper[5016]: I1211 11:40:38.561389 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/pull/0.log" Dec 11 11:40:38 crc kubenswrapper[5016]: I1211 11:40:38.751669 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/extract/0.log" Dec 11 11:40:38 crc kubenswrapper[5016]: I1211 11:40:38.788359 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/util/0.log" Dec 11 11:40:38 crc kubenswrapper[5016]: I1211 11:40:38.817397 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/pull/0.log" Dec 11 11:40:38 crc kubenswrapper[5016]: I1211 11:40:38.959578 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/extract-utilities/0.log" Dec 11 11:40:39 crc kubenswrapper[5016]: I1211 11:40:39.184891 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/extract-utilities/0.log" Dec 11 11:40:39 crc kubenswrapper[5016]: I1211 11:40:39.191282 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/extract-content/0.log" Dec 11 11:40:39 crc kubenswrapper[5016]: I1211 11:40:39.244823 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/extract-content/0.log" Dec 11 11:40:39 crc kubenswrapper[5016]: I1211 11:40:39.379115 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/extract-content/0.log" Dec 11 11:40:39 crc kubenswrapper[5016]: I1211 11:40:39.403739 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/extract-utilities/0.log" Dec 11 11:40:39 crc kubenswrapper[5016]: I1211 11:40:39.581059 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/registry-server/0.log" Dec 11 11:40:39 crc kubenswrapper[5016]: I1211 11:40:39.637173 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/extract-utilities/0.log" Dec 11 11:40:39 crc kubenswrapper[5016]: I1211 11:40:39.974972 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/extract-utilities/0.log" Dec 11 11:40:39 crc kubenswrapper[5016]: I1211 11:40:39.982896 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/extract-content/0.log" Dec 11 11:40:40 crc kubenswrapper[5016]: I1211 11:40:40.005067 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/extract-content/0.log" Dec 11 11:40:40 crc kubenswrapper[5016]: I1211 11:40:40.176815 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/extract-content/0.log" Dec 11 11:40:40 crc kubenswrapper[5016]: I1211 11:40:40.214714 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/extract-utilities/0.log" Dec 11 11:40:40 crc kubenswrapper[5016]: I1211 11:40:40.418102 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-fqzqf_60a5c1c0-450b-4360-b6f5-7380a0a2db4f/marketplace-operator/0.log" Dec 11 11:40:40 crc kubenswrapper[5016]: I1211 11:40:40.566313 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/extract-utilities/0.log" Dec 11 11:40:40 crc kubenswrapper[5016]: I1211 11:40:40.737315 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/registry-server/0.log" Dec 11 11:40:40 crc kubenswrapper[5016]: I1211 11:40:40.833226 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/extract-utilities/0.log" Dec 11 11:40:40 crc kubenswrapper[5016]: I1211 11:40:40.855553 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/extract-content/0.log" Dec 11 11:40:40 crc kubenswrapper[5016]: I1211 11:40:40.893198 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/extract-content/0.log" Dec 11 11:40:41 crc kubenswrapper[5016]: I1211 11:40:41.139694 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/extract-utilities/0.log" Dec 11 11:40:41 crc kubenswrapper[5016]: I1211 11:40:41.150688 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/extract-content/0.log" Dec 11 11:40:41 crc kubenswrapper[5016]: I1211 11:40:41.280043 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/registry-server/0.log" Dec 11 11:40:41 crc kubenswrapper[5016]: I1211 11:40:41.367077 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/extract-utilities/0.log" Dec 11 11:40:41 crc kubenswrapper[5016]: I1211 11:40:41.544239 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/extract-utilities/0.log" Dec 11 11:40:41 crc kubenswrapper[5016]: I1211 11:40:41.566849 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/extract-content/0.log" Dec 11 11:40:41 crc kubenswrapper[5016]: I1211 11:40:41.574564 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/extract-content/0.log" Dec 11 11:40:41 crc kubenswrapper[5016]: I1211 11:40:41.739995 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/extract-utilities/0.log" Dec 11 11:40:41 crc kubenswrapper[5016]: I1211 11:40:41.783070 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/extract-content/0.log" Dec 11 11:40:42 crc kubenswrapper[5016]: I1211 11:40:42.323711 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/registry-server/0.log" Dec 11 11:40:46 crc kubenswrapper[5016]: I1211 11:40:46.475029 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:40:46 crc kubenswrapper[5016]: E1211 11:40:46.476044 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:40:57 crc kubenswrapper[5016]: I1211 11:40:57.474344 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:40:57 crc kubenswrapper[5016]: E1211 11:40:57.475319 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:41:10 crc kubenswrapper[5016]: I1211 11:41:10.475554 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:41:10 crc kubenswrapper[5016]: E1211 11:41:10.477473 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:41:23 crc kubenswrapper[5016]: I1211 11:41:23.483573 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:41:23 crc kubenswrapper[5016]: E1211 11:41:23.484602 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:41:35 crc kubenswrapper[5016]: I1211 11:41:35.477207 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:41:35 crc kubenswrapper[5016]: E1211 11:41:35.479629 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:41:46 crc kubenswrapper[5016]: I1211 11:41:46.474822 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:41:46 crc kubenswrapper[5016]: E1211 11:41:46.475800 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:42:01 crc kubenswrapper[5016]: I1211 11:42:01.474790 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:42:01 crc kubenswrapper[5016]: E1211 11:42:01.477198 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:42:13 crc kubenswrapper[5016]: I1211 11:42:13.484681 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:42:13 crc kubenswrapper[5016]: E1211 11:42:13.485729 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.206143 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-t7969"] Dec 11 11:42:20 crc kubenswrapper[5016]: E1211 11:42:20.207391 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" containerName="registry-server" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.207411 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" containerName="registry-server" Dec 11 11:42:20 crc kubenswrapper[5016]: E1211 11:42:20.207433 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" containerName="extract-content" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.207442 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" containerName="extract-content" Dec 11 11:42:20 crc kubenswrapper[5016]: E1211 11:42:20.207456 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" containerName="extract-utilities" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.207466 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" containerName="extract-utilities" Dec 11 11:42:20 crc kubenswrapper[5016]: E1211 11:42:20.207488 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25fd3891-ef7f-4128-8f17-4e3ac41495a6" containerName="container-00" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.207495 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="25fd3891-ef7f-4128-8f17-4e3ac41495a6" containerName="container-00" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.207745 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="25fd3891-ef7f-4128-8f17-4e3ac41495a6" containerName="container-00" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.207773 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d80b14a-2be2-41bd-bc94-c1027fbeb5d8" containerName="registry-server" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.209412 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.225145 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-t7969"] Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.275447 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72205571-69dc-48fa-b912-3f7e6af72013-catalog-content\") pod \"redhat-marketplace-t7969\" (UID: \"72205571-69dc-48fa-b912-3f7e6af72013\") " pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.275487 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5frt\" (UniqueName: \"kubernetes.io/projected/72205571-69dc-48fa-b912-3f7e6af72013-kube-api-access-f5frt\") pod \"redhat-marketplace-t7969\" (UID: \"72205571-69dc-48fa-b912-3f7e6af72013\") " pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.275522 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72205571-69dc-48fa-b912-3f7e6af72013-utilities\") pod \"redhat-marketplace-t7969\" (UID: \"72205571-69dc-48fa-b912-3f7e6af72013\") " pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.378007 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72205571-69dc-48fa-b912-3f7e6af72013-catalog-content\") pod \"redhat-marketplace-t7969\" (UID: \"72205571-69dc-48fa-b912-3f7e6af72013\") " pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.378523 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5frt\" (UniqueName: \"kubernetes.io/projected/72205571-69dc-48fa-b912-3f7e6af72013-kube-api-access-f5frt\") pod \"redhat-marketplace-t7969\" (UID: \"72205571-69dc-48fa-b912-3f7e6af72013\") " pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.378572 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72205571-69dc-48fa-b912-3f7e6af72013-utilities\") pod \"redhat-marketplace-t7969\" (UID: \"72205571-69dc-48fa-b912-3f7e6af72013\") " pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.378647 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72205571-69dc-48fa-b912-3f7e6af72013-catalog-content\") pod \"redhat-marketplace-t7969\" (UID: \"72205571-69dc-48fa-b912-3f7e6af72013\") " pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.379102 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72205571-69dc-48fa-b912-3f7e6af72013-utilities\") pod \"redhat-marketplace-t7969\" (UID: \"72205571-69dc-48fa-b912-3f7e6af72013\") " pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.406453 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5frt\" (UniqueName: \"kubernetes.io/projected/72205571-69dc-48fa-b912-3f7e6af72013-kube-api-access-f5frt\") pod \"redhat-marketplace-t7969\" (UID: \"72205571-69dc-48fa-b912-3f7e6af72013\") " pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:20 crc kubenswrapper[5016]: I1211 11:42:20.575546 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:21 crc kubenswrapper[5016]: I1211 11:42:21.096334 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-t7969"] Dec 11 11:42:21 crc kubenswrapper[5016]: I1211 11:42:21.195015 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t7969" event={"ID":"72205571-69dc-48fa-b912-3f7e6af72013","Type":"ContainerStarted","Data":"3740e5a5d6dc3e1f39ccae33b8831b01f2dd560763edb2893069e503eb9b5762"} Dec 11 11:42:22 crc kubenswrapper[5016]: I1211 11:42:22.210868 5016 generic.go:334] "Generic (PLEG): container finished" podID="72205571-69dc-48fa-b912-3f7e6af72013" containerID="53c080309e444f23a3ece463326e8f0f20efbac0e452e26536fa0fc3c3489650" exitCode=0 Dec 11 11:42:22 crc kubenswrapper[5016]: I1211 11:42:22.211032 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t7969" event={"ID":"72205571-69dc-48fa-b912-3f7e6af72013","Type":"ContainerDied","Data":"53c080309e444f23a3ece463326e8f0f20efbac0e452e26536fa0fc3c3489650"} Dec 11 11:42:22 crc kubenswrapper[5016]: I1211 11:42:22.217814 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 11:42:24 crc kubenswrapper[5016]: I1211 11:42:24.475853 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:42:24 crc kubenswrapper[5016]: E1211 11:42:24.477184 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:42:25 crc kubenswrapper[5016]: I1211 11:42:25.252231 5016 generic.go:334] "Generic (PLEG): container finished" podID="72205571-69dc-48fa-b912-3f7e6af72013" containerID="d83b3a205f04f09ece5236dc8e0806848b5fd88ee1aaf0a94f4e0e24aae6d04d" exitCode=0 Dec 11 11:42:25 crc kubenswrapper[5016]: I1211 11:42:25.252661 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t7969" event={"ID":"72205571-69dc-48fa-b912-3f7e6af72013","Type":"ContainerDied","Data":"d83b3a205f04f09ece5236dc8e0806848b5fd88ee1aaf0a94f4e0e24aae6d04d"} Dec 11 11:42:27 crc kubenswrapper[5016]: I1211 11:42:27.273520 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t7969" event={"ID":"72205571-69dc-48fa-b912-3f7e6af72013","Type":"ContainerStarted","Data":"36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb"} Dec 11 11:42:27 crc kubenswrapper[5016]: I1211 11:42:27.303767 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-t7969" podStartSLOduration=3.277336594 podStartE2EDuration="7.303747924s" podCreationTimestamp="2025-12-11 11:42:20 +0000 UTC" firstStartedPulling="2025-12-11 11:42:22.217452421 +0000 UTC m=+4059.036012010" lastFinishedPulling="2025-12-11 11:42:26.243863761 +0000 UTC m=+4063.062423340" observedRunningTime="2025-12-11 11:42:27.300788442 +0000 UTC m=+4064.119348031" watchObservedRunningTime="2025-12-11 11:42:27.303747924 +0000 UTC m=+4064.122307503" Dec 11 11:42:30 crc kubenswrapper[5016]: I1211 11:42:30.576658 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:30 crc kubenswrapper[5016]: I1211 11:42:30.577553 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:30 crc kubenswrapper[5016]: I1211 11:42:30.644507 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:31 crc kubenswrapper[5016]: I1211 11:42:31.385267 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:31 crc kubenswrapper[5016]: I1211 11:42:31.448090 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-t7969"] Dec 11 11:42:33 crc kubenswrapper[5016]: I1211 11:42:33.344096 5016 generic.go:334] "Generic (PLEG): container finished" podID="080905d3-b916-4151-b81b-b7b30fe7c291" containerID="f49ce6bf40755ad3151fe6c669bb24699599a2dea94483849668ecc99388663c" exitCode=0 Dec 11 11:42:33 crc kubenswrapper[5016]: I1211 11:42:33.344200 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pq25m/must-gather-sfprq" event={"ID":"080905d3-b916-4151-b81b-b7b30fe7c291","Type":"ContainerDied","Data":"f49ce6bf40755ad3151fe6c669bb24699599a2dea94483849668ecc99388663c"} Dec 11 11:42:33 crc kubenswrapper[5016]: I1211 11:42:33.344920 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-t7969" podUID="72205571-69dc-48fa-b912-3f7e6af72013" containerName="registry-server" containerID="cri-o://36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb" gracePeriod=2 Dec 11 11:42:33 crc kubenswrapper[5016]: I1211 11:42:33.346144 5016 scope.go:117] "RemoveContainer" containerID="f49ce6bf40755ad3151fe6c669bb24699599a2dea94483849668ecc99388663c" Dec 11 11:42:33 crc kubenswrapper[5016]: I1211 11:42:33.859191 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:33 crc kubenswrapper[5016]: I1211 11:42:33.986634 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5frt\" (UniqueName: \"kubernetes.io/projected/72205571-69dc-48fa-b912-3f7e6af72013-kube-api-access-f5frt\") pod \"72205571-69dc-48fa-b912-3f7e6af72013\" (UID: \"72205571-69dc-48fa-b912-3f7e6af72013\") " Dec 11 11:42:33 crc kubenswrapper[5016]: I1211 11:42:33.986823 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72205571-69dc-48fa-b912-3f7e6af72013-catalog-content\") pod \"72205571-69dc-48fa-b912-3f7e6af72013\" (UID: \"72205571-69dc-48fa-b912-3f7e6af72013\") " Dec 11 11:42:33 crc kubenswrapper[5016]: I1211 11:42:33.986974 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72205571-69dc-48fa-b912-3f7e6af72013-utilities\") pod \"72205571-69dc-48fa-b912-3f7e6af72013\" (UID: \"72205571-69dc-48fa-b912-3f7e6af72013\") " Dec 11 11:42:33 crc kubenswrapper[5016]: I1211 11:42:33.988709 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72205571-69dc-48fa-b912-3f7e6af72013-utilities" (OuterVolumeSpecName: "utilities") pod "72205571-69dc-48fa-b912-3f7e6af72013" (UID: "72205571-69dc-48fa-b912-3f7e6af72013"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:42:33 crc kubenswrapper[5016]: I1211 11:42:33.996194 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72205571-69dc-48fa-b912-3f7e6af72013-kube-api-access-f5frt" (OuterVolumeSpecName: "kube-api-access-f5frt") pod "72205571-69dc-48fa-b912-3f7e6af72013" (UID: "72205571-69dc-48fa-b912-3f7e6af72013"). InnerVolumeSpecName "kube-api-access-f5frt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.012084 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72205571-69dc-48fa-b912-3f7e6af72013-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "72205571-69dc-48fa-b912-3f7e6af72013" (UID: "72205571-69dc-48fa-b912-3f7e6af72013"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.062683 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pq25m_must-gather-sfprq_080905d3-b916-4151-b81b-b7b30fe7c291/gather/0.log" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.090151 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72205571-69dc-48fa-b912-3f7e6af72013-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.090191 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5frt\" (UniqueName: \"kubernetes.io/projected/72205571-69dc-48fa-b912-3f7e6af72013-kube-api-access-f5frt\") on node \"crc\" DevicePath \"\"" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.090203 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72205571-69dc-48fa-b912-3f7e6af72013-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.366833 5016 generic.go:334] "Generic (PLEG): container finished" podID="72205571-69dc-48fa-b912-3f7e6af72013" containerID="36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb" exitCode=0 Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.366975 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t7969" event={"ID":"72205571-69dc-48fa-b912-3f7e6af72013","Type":"ContainerDied","Data":"36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb"} Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.367037 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t7969" event={"ID":"72205571-69dc-48fa-b912-3f7e6af72013","Type":"ContainerDied","Data":"3740e5a5d6dc3e1f39ccae33b8831b01f2dd560763edb2893069e503eb9b5762"} Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.367068 5016 scope.go:117] "RemoveContainer" containerID="36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.367858 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t7969" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.406807 5016 scope.go:117] "RemoveContainer" containerID="d83b3a205f04f09ece5236dc8e0806848b5fd88ee1aaf0a94f4e0e24aae6d04d" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.419381 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-t7969"] Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.432557 5016 scope.go:117] "RemoveContainer" containerID="53c080309e444f23a3ece463326e8f0f20efbac0e452e26536fa0fc3c3489650" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.433899 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-t7969"] Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.487153 5016 scope.go:117] "RemoveContainer" containerID="36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb" Dec 11 11:42:34 crc kubenswrapper[5016]: E1211 11:42:34.487717 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb\": container with ID starting with 36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb not found: ID does not exist" containerID="36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.487753 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb"} err="failed to get container status \"36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb\": rpc error: code = NotFound desc = could not find container \"36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb\": container with ID starting with 36663a2248d479561fa8489ce7a54ab63275e49dfa0608819f9faeb875d401fb not found: ID does not exist" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.487783 5016 scope.go:117] "RemoveContainer" containerID="d83b3a205f04f09ece5236dc8e0806848b5fd88ee1aaf0a94f4e0e24aae6d04d" Dec 11 11:42:34 crc kubenswrapper[5016]: E1211 11:42:34.488607 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d83b3a205f04f09ece5236dc8e0806848b5fd88ee1aaf0a94f4e0e24aae6d04d\": container with ID starting with d83b3a205f04f09ece5236dc8e0806848b5fd88ee1aaf0a94f4e0e24aae6d04d not found: ID does not exist" containerID="d83b3a205f04f09ece5236dc8e0806848b5fd88ee1aaf0a94f4e0e24aae6d04d" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.488755 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d83b3a205f04f09ece5236dc8e0806848b5fd88ee1aaf0a94f4e0e24aae6d04d"} err="failed to get container status \"d83b3a205f04f09ece5236dc8e0806848b5fd88ee1aaf0a94f4e0e24aae6d04d\": rpc error: code = NotFound desc = could not find container \"d83b3a205f04f09ece5236dc8e0806848b5fd88ee1aaf0a94f4e0e24aae6d04d\": container with ID starting with d83b3a205f04f09ece5236dc8e0806848b5fd88ee1aaf0a94f4e0e24aae6d04d not found: ID does not exist" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.488858 5016 scope.go:117] "RemoveContainer" containerID="53c080309e444f23a3ece463326e8f0f20efbac0e452e26536fa0fc3c3489650" Dec 11 11:42:34 crc kubenswrapper[5016]: E1211 11:42:34.489428 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53c080309e444f23a3ece463326e8f0f20efbac0e452e26536fa0fc3c3489650\": container with ID starting with 53c080309e444f23a3ece463326e8f0f20efbac0e452e26536fa0fc3c3489650 not found: ID does not exist" containerID="53c080309e444f23a3ece463326e8f0f20efbac0e452e26536fa0fc3c3489650" Dec 11 11:42:34 crc kubenswrapper[5016]: I1211 11:42:34.489492 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53c080309e444f23a3ece463326e8f0f20efbac0e452e26536fa0fc3c3489650"} err="failed to get container status \"53c080309e444f23a3ece463326e8f0f20efbac0e452e26536fa0fc3c3489650\": rpc error: code = NotFound desc = could not find container \"53c080309e444f23a3ece463326e8f0f20efbac0e452e26536fa0fc3c3489650\": container with ID starting with 53c080309e444f23a3ece463326e8f0f20efbac0e452e26536fa0fc3c3489650 not found: ID does not exist" Dec 11 11:42:35 crc kubenswrapper[5016]: I1211 11:42:35.491883 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72205571-69dc-48fa-b912-3f7e6af72013" path="/var/lib/kubelet/pods/72205571-69dc-48fa-b912-3f7e6af72013/volumes" Dec 11 11:42:36 crc kubenswrapper[5016]: I1211 11:42:36.475745 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:42:36 crc kubenswrapper[5016]: E1211 11:42:36.476209 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:42:42 crc kubenswrapper[5016]: I1211 11:42:42.571086 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pq25m/must-gather-sfprq"] Dec 11 11:42:42 crc kubenswrapper[5016]: I1211 11:42:42.572320 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-pq25m/must-gather-sfprq" podUID="080905d3-b916-4151-b81b-b7b30fe7c291" containerName="copy" containerID="cri-o://8dae247535b83517bf7568847b201f9618870283ccba7ca98bce54b3770e13f7" gracePeriod=2 Dec 11 11:42:42 crc kubenswrapper[5016]: I1211 11:42:42.581857 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pq25m/must-gather-sfprq"] Dec 11 11:42:43 crc kubenswrapper[5016]: I1211 11:42:43.508758 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pq25m_must-gather-sfprq_080905d3-b916-4151-b81b-b7b30fe7c291/copy/0.log" Dec 11 11:42:43 crc kubenswrapper[5016]: I1211 11:42:43.510298 5016 generic.go:334] "Generic (PLEG): container finished" podID="080905d3-b916-4151-b81b-b7b30fe7c291" containerID="8dae247535b83517bf7568847b201f9618870283ccba7ca98bce54b3770e13f7" exitCode=143 Dec 11 11:42:43 crc kubenswrapper[5016]: I1211 11:42:43.510353 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e69f8be6b25bf16046372ee74a37e8c2bd34b01cf2363c7ea000ec4b6cbf77b7" Dec 11 11:42:43 crc kubenswrapper[5016]: I1211 11:42:43.534139 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pq25m_must-gather-sfprq_080905d3-b916-4151-b81b-b7b30fe7c291/copy/0.log" Dec 11 11:42:43 crc kubenswrapper[5016]: I1211 11:42:43.534638 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/must-gather-sfprq" Dec 11 11:42:43 crc kubenswrapper[5016]: I1211 11:42:43.627693 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nn8jl\" (UniqueName: \"kubernetes.io/projected/080905d3-b916-4151-b81b-b7b30fe7c291-kube-api-access-nn8jl\") pod \"080905d3-b916-4151-b81b-b7b30fe7c291\" (UID: \"080905d3-b916-4151-b81b-b7b30fe7c291\") " Dec 11 11:42:43 crc kubenswrapper[5016]: I1211 11:42:43.630920 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/080905d3-b916-4151-b81b-b7b30fe7c291-must-gather-output\") pod \"080905d3-b916-4151-b81b-b7b30fe7c291\" (UID: \"080905d3-b916-4151-b81b-b7b30fe7c291\") " Dec 11 11:42:43 crc kubenswrapper[5016]: I1211 11:42:43.647786 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/080905d3-b916-4151-b81b-b7b30fe7c291-kube-api-access-nn8jl" (OuterVolumeSpecName: "kube-api-access-nn8jl") pod "080905d3-b916-4151-b81b-b7b30fe7c291" (UID: "080905d3-b916-4151-b81b-b7b30fe7c291"). InnerVolumeSpecName "kube-api-access-nn8jl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:42:43 crc kubenswrapper[5016]: I1211 11:42:43.734910 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nn8jl\" (UniqueName: \"kubernetes.io/projected/080905d3-b916-4151-b81b-b7b30fe7c291-kube-api-access-nn8jl\") on node \"crc\" DevicePath \"\"" Dec 11 11:42:43 crc kubenswrapper[5016]: I1211 11:42:43.820002 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/080905d3-b916-4151-b81b-b7b30fe7c291-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "080905d3-b916-4151-b81b-b7b30fe7c291" (UID: "080905d3-b916-4151-b81b-b7b30fe7c291"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:42:43 crc kubenswrapper[5016]: I1211 11:42:43.837290 5016 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/080905d3-b916-4151-b81b-b7b30fe7c291-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 11 11:42:44 crc kubenswrapper[5016]: I1211 11:42:44.522909 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pq25m/must-gather-sfprq" Dec 11 11:42:45 crc kubenswrapper[5016]: I1211 11:42:45.487760 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="080905d3-b916-4151-b81b-b7b30fe7c291" path="/var/lib/kubelet/pods/080905d3-b916-4151-b81b-b7b30fe7c291/volumes" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.390126 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bwndd"] Dec 11 11:42:47 crc kubenswrapper[5016]: E1211 11:42:47.391095 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72205571-69dc-48fa-b912-3f7e6af72013" containerName="extract-content" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.391114 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="72205571-69dc-48fa-b912-3f7e6af72013" containerName="extract-content" Dec 11 11:42:47 crc kubenswrapper[5016]: E1211 11:42:47.391133 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="080905d3-b916-4151-b81b-b7b30fe7c291" containerName="copy" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.391140 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="080905d3-b916-4151-b81b-b7b30fe7c291" containerName="copy" Dec 11 11:42:47 crc kubenswrapper[5016]: E1211 11:42:47.391159 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="080905d3-b916-4151-b81b-b7b30fe7c291" containerName="gather" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.391167 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="080905d3-b916-4151-b81b-b7b30fe7c291" containerName="gather" Dec 11 11:42:47 crc kubenswrapper[5016]: E1211 11:42:47.391187 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72205571-69dc-48fa-b912-3f7e6af72013" containerName="extract-utilities" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.391196 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="72205571-69dc-48fa-b912-3f7e6af72013" containerName="extract-utilities" Dec 11 11:42:47 crc kubenswrapper[5016]: E1211 11:42:47.391219 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72205571-69dc-48fa-b912-3f7e6af72013" containerName="registry-server" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.391227 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="72205571-69dc-48fa-b912-3f7e6af72013" containerName="registry-server" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.391446 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="080905d3-b916-4151-b81b-b7b30fe7c291" containerName="gather" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.391483 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="72205571-69dc-48fa-b912-3f7e6af72013" containerName="registry-server" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.391526 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="080905d3-b916-4151-b81b-b7b30fe7c291" containerName="copy" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.393103 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.419022 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bwndd"] Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.526161 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2571e26c-1c9e-49c8-9ce5-d949bb227f41-utilities\") pod \"community-operators-bwndd\" (UID: \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\") " pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.526754 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlj8k\" (UniqueName: \"kubernetes.io/projected/2571e26c-1c9e-49c8-9ce5-d949bb227f41-kube-api-access-hlj8k\") pod \"community-operators-bwndd\" (UID: \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\") " pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.526782 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2571e26c-1c9e-49c8-9ce5-d949bb227f41-catalog-content\") pod \"community-operators-bwndd\" (UID: \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\") " pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.628666 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlj8k\" (UniqueName: \"kubernetes.io/projected/2571e26c-1c9e-49c8-9ce5-d949bb227f41-kube-api-access-hlj8k\") pod \"community-operators-bwndd\" (UID: \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\") " pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.629009 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2571e26c-1c9e-49c8-9ce5-d949bb227f41-catalog-content\") pod \"community-operators-bwndd\" (UID: \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\") " pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.629312 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2571e26c-1c9e-49c8-9ce5-d949bb227f41-utilities\") pod \"community-operators-bwndd\" (UID: \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\") " pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.629895 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2571e26c-1c9e-49c8-9ce5-d949bb227f41-utilities\") pod \"community-operators-bwndd\" (UID: \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\") " pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.629997 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2571e26c-1c9e-49c8-9ce5-d949bb227f41-catalog-content\") pod \"community-operators-bwndd\" (UID: \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\") " pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.656576 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlj8k\" (UniqueName: \"kubernetes.io/projected/2571e26c-1c9e-49c8-9ce5-d949bb227f41-kube-api-access-hlj8k\") pod \"community-operators-bwndd\" (UID: \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\") " pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:47 crc kubenswrapper[5016]: I1211 11:42:47.717297 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:48 crc kubenswrapper[5016]: I1211 11:42:48.088326 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bwndd"] Dec 11 11:42:48 crc kubenswrapper[5016]: I1211 11:42:48.569592 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwndd" event={"ID":"2571e26c-1c9e-49c8-9ce5-d949bb227f41","Type":"ContainerStarted","Data":"04f2a825df9ea4ce7047f8fe513030d572d2cf0487ec7f24f5168a12e439e53a"} Dec 11 11:42:49 crc kubenswrapper[5016]: I1211 11:42:49.610369 5016 generic.go:334] "Generic (PLEG): container finished" podID="2571e26c-1c9e-49c8-9ce5-d949bb227f41" containerID="3a0c10b7210ef99e68af9f5f7a47ba3cdf41f0c383ef97a0dbf1c082c487e346" exitCode=0 Dec 11 11:42:49 crc kubenswrapper[5016]: I1211 11:42:49.610750 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwndd" event={"ID":"2571e26c-1c9e-49c8-9ce5-d949bb227f41","Type":"ContainerDied","Data":"3a0c10b7210ef99e68af9f5f7a47ba3cdf41f0c383ef97a0dbf1c082c487e346"} Dec 11 11:42:50 crc kubenswrapper[5016]: I1211 11:42:50.475528 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:42:50 crc kubenswrapper[5016]: E1211 11:42:50.476269 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:42:50 crc kubenswrapper[5016]: I1211 11:42:50.626599 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwndd" event={"ID":"2571e26c-1c9e-49c8-9ce5-d949bb227f41","Type":"ContainerStarted","Data":"cf41595404fe09409651abebe4b165b31e852fc6869c358f20013718ce547084"} Dec 11 11:42:51 crc kubenswrapper[5016]: I1211 11:42:51.638481 5016 generic.go:334] "Generic (PLEG): container finished" podID="2571e26c-1c9e-49c8-9ce5-d949bb227f41" containerID="cf41595404fe09409651abebe4b165b31e852fc6869c358f20013718ce547084" exitCode=0 Dec 11 11:42:51 crc kubenswrapper[5016]: I1211 11:42:51.638592 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwndd" event={"ID":"2571e26c-1c9e-49c8-9ce5-d949bb227f41","Type":"ContainerDied","Data":"cf41595404fe09409651abebe4b165b31e852fc6869c358f20013718ce547084"} Dec 11 11:42:53 crc kubenswrapper[5016]: I1211 11:42:53.688552 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwndd" event={"ID":"2571e26c-1c9e-49c8-9ce5-d949bb227f41","Type":"ContainerStarted","Data":"633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043"} Dec 11 11:42:53 crc kubenswrapper[5016]: I1211 11:42:53.722363 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bwndd" podStartSLOduration=3.769714656 podStartE2EDuration="6.722335951s" podCreationTimestamp="2025-12-11 11:42:47 +0000 UTC" firstStartedPulling="2025-12-11 11:42:49.615422591 +0000 UTC m=+4086.433982170" lastFinishedPulling="2025-12-11 11:42:52.568043896 +0000 UTC m=+4089.386603465" observedRunningTime="2025-12-11 11:42:53.716898058 +0000 UTC m=+4090.535457647" watchObservedRunningTime="2025-12-11 11:42:53.722335951 +0000 UTC m=+4090.540895520" Dec 11 11:42:57 crc kubenswrapper[5016]: I1211 11:42:57.717644 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:57 crc kubenswrapper[5016]: I1211 11:42:57.718462 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:57 crc kubenswrapper[5016]: I1211 11:42:57.772744 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:57 crc kubenswrapper[5016]: I1211 11:42:57.830157 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:42:58 crc kubenswrapper[5016]: I1211 11:42:58.026618 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bwndd"] Dec 11 11:42:59 crc kubenswrapper[5016]: I1211 11:42:59.760712 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bwndd" podUID="2571e26c-1c9e-49c8-9ce5-d949bb227f41" containerName="registry-server" containerID="cri-o://633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043" gracePeriod=2 Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.770282 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.772891 5016 generic.go:334] "Generic (PLEG): container finished" podID="2571e26c-1c9e-49c8-9ce5-d949bb227f41" containerID="633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043" exitCode=0 Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.772932 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwndd" event={"ID":"2571e26c-1c9e-49c8-9ce5-d949bb227f41","Type":"ContainerDied","Data":"633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043"} Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.772981 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bwndd" event={"ID":"2571e26c-1c9e-49c8-9ce5-d949bb227f41","Type":"ContainerDied","Data":"04f2a825df9ea4ce7047f8fe513030d572d2cf0487ec7f24f5168a12e439e53a"} Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.773000 5016 scope.go:117] "RemoveContainer" containerID="633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043" Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.798691 5016 scope.go:117] "RemoveContainer" containerID="cf41595404fe09409651abebe4b165b31e852fc6869c358f20013718ce547084" Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.824207 5016 scope.go:117] "RemoveContainer" containerID="3a0c10b7210ef99e68af9f5f7a47ba3cdf41f0c383ef97a0dbf1c082c487e346" Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.871233 5016 scope.go:117] "RemoveContainer" containerID="633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043" Dec 11 11:43:00 crc kubenswrapper[5016]: E1211 11:43:00.871710 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043\": container with ID starting with 633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043 not found: ID does not exist" containerID="633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043" Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.871823 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043"} err="failed to get container status \"633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043\": rpc error: code = NotFound desc = could not find container \"633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043\": container with ID starting with 633e1986568fb447ba1e1d74c2da642a1b38f4d240ecf8df8f7f5c69468e8043 not found: ID does not exist" Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.872105 5016 scope.go:117] "RemoveContainer" containerID="cf41595404fe09409651abebe4b165b31e852fc6869c358f20013718ce547084" Dec 11 11:43:00 crc kubenswrapper[5016]: E1211 11:43:00.872509 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf41595404fe09409651abebe4b165b31e852fc6869c358f20013718ce547084\": container with ID starting with cf41595404fe09409651abebe4b165b31e852fc6869c358f20013718ce547084 not found: ID does not exist" containerID="cf41595404fe09409651abebe4b165b31e852fc6869c358f20013718ce547084" Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.872630 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf41595404fe09409651abebe4b165b31e852fc6869c358f20013718ce547084"} err="failed to get container status \"cf41595404fe09409651abebe4b165b31e852fc6869c358f20013718ce547084\": rpc error: code = NotFound desc = could not find container \"cf41595404fe09409651abebe4b165b31e852fc6869c358f20013718ce547084\": container with ID starting with cf41595404fe09409651abebe4b165b31e852fc6869c358f20013718ce547084 not found: ID does not exist" Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.872717 5016 scope.go:117] "RemoveContainer" containerID="3a0c10b7210ef99e68af9f5f7a47ba3cdf41f0c383ef97a0dbf1c082c487e346" Dec 11 11:43:00 crc kubenswrapper[5016]: E1211 11:43:00.873243 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a0c10b7210ef99e68af9f5f7a47ba3cdf41f0c383ef97a0dbf1c082c487e346\": container with ID starting with 3a0c10b7210ef99e68af9f5f7a47ba3cdf41f0c383ef97a0dbf1c082c487e346 not found: ID does not exist" containerID="3a0c10b7210ef99e68af9f5f7a47ba3cdf41f0c383ef97a0dbf1c082c487e346" Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.873285 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a0c10b7210ef99e68af9f5f7a47ba3cdf41f0c383ef97a0dbf1c082c487e346"} err="failed to get container status \"3a0c10b7210ef99e68af9f5f7a47ba3cdf41f0c383ef97a0dbf1c082c487e346\": rpc error: code = NotFound desc = could not find container \"3a0c10b7210ef99e68af9f5f7a47ba3cdf41f0c383ef97a0dbf1c082c487e346\": container with ID starting with 3a0c10b7210ef99e68af9f5f7a47ba3cdf41f0c383ef97a0dbf1c082c487e346 not found: ID does not exist" Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.953265 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2571e26c-1c9e-49c8-9ce5-d949bb227f41-catalog-content\") pod \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\" (UID: \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\") " Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.953464 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2571e26c-1c9e-49c8-9ce5-d949bb227f41-utilities\") pod \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\" (UID: \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\") " Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.955383 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2571e26c-1c9e-49c8-9ce5-d949bb227f41-utilities" (OuterVolumeSpecName: "utilities") pod "2571e26c-1c9e-49c8-9ce5-d949bb227f41" (UID: "2571e26c-1c9e-49c8-9ce5-d949bb227f41"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.955506 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlj8k\" (UniqueName: \"kubernetes.io/projected/2571e26c-1c9e-49c8-9ce5-d949bb227f41-kube-api-access-hlj8k\") pod \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\" (UID: \"2571e26c-1c9e-49c8-9ce5-d949bb227f41\") " Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.958494 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2571e26c-1c9e-49c8-9ce5-d949bb227f41-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:43:00 crc kubenswrapper[5016]: I1211 11:43:00.964355 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2571e26c-1c9e-49c8-9ce5-d949bb227f41-kube-api-access-hlj8k" (OuterVolumeSpecName: "kube-api-access-hlj8k") pod "2571e26c-1c9e-49c8-9ce5-d949bb227f41" (UID: "2571e26c-1c9e-49c8-9ce5-d949bb227f41"). InnerVolumeSpecName "kube-api-access-hlj8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:43:01 crc kubenswrapper[5016]: I1211 11:43:01.062340 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlj8k\" (UniqueName: \"kubernetes.io/projected/2571e26c-1c9e-49c8-9ce5-d949bb227f41-kube-api-access-hlj8k\") on node \"crc\" DevicePath \"\"" Dec 11 11:43:01 crc kubenswrapper[5016]: I1211 11:43:01.201273 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2571e26c-1c9e-49c8-9ce5-d949bb227f41-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2571e26c-1c9e-49c8-9ce5-d949bb227f41" (UID: "2571e26c-1c9e-49c8-9ce5-d949bb227f41"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:43:01 crc kubenswrapper[5016]: I1211 11:43:01.267076 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2571e26c-1c9e-49c8-9ce5-d949bb227f41-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:43:01 crc kubenswrapper[5016]: I1211 11:43:01.784062 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bwndd" Dec 11 11:43:01 crc kubenswrapper[5016]: I1211 11:43:01.814086 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bwndd"] Dec 11 11:43:01 crc kubenswrapper[5016]: I1211 11:43:01.828673 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bwndd"] Dec 11 11:43:03 crc kubenswrapper[5016]: I1211 11:43:03.490118 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2571e26c-1c9e-49c8-9ce5-d949bb227f41" path="/var/lib/kubelet/pods/2571e26c-1c9e-49c8-9ce5-d949bb227f41/volumes" Dec 11 11:43:04 crc kubenswrapper[5016]: I1211 11:43:04.475259 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:43:04 crc kubenswrapper[5016]: E1211 11:43:04.475779 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:43:13 crc kubenswrapper[5016]: I1211 11:43:13.039079 5016 scope.go:117] "RemoveContainer" containerID="8dae247535b83517bf7568847b201f9618870283ccba7ca98bce54b3770e13f7" Dec 11 11:43:13 crc kubenswrapper[5016]: I1211 11:43:13.065565 5016 scope.go:117] "RemoveContainer" containerID="f49ce6bf40755ad3151fe6c669bb24699599a2dea94483849668ecc99388663c" Dec 11 11:43:16 crc kubenswrapper[5016]: I1211 11:43:16.475434 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:43:16 crc kubenswrapper[5016]: E1211 11:43:16.476413 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.581433 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zcmgr"] Dec 11 11:43:29 crc kubenswrapper[5016]: E1211 11:43:29.582596 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2571e26c-1c9e-49c8-9ce5-d949bb227f41" containerName="registry-server" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.582612 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2571e26c-1c9e-49c8-9ce5-d949bb227f41" containerName="registry-server" Dec 11 11:43:29 crc kubenswrapper[5016]: E1211 11:43:29.582639 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2571e26c-1c9e-49c8-9ce5-d949bb227f41" containerName="extract-utilities" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.582646 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2571e26c-1c9e-49c8-9ce5-d949bb227f41" containerName="extract-utilities" Dec 11 11:43:29 crc kubenswrapper[5016]: E1211 11:43:29.582678 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2571e26c-1c9e-49c8-9ce5-d949bb227f41" containerName="extract-content" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.582687 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="2571e26c-1c9e-49c8-9ce5-d949bb227f41" containerName="extract-content" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.582906 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="2571e26c-1c9e-49c8-9ce5-d949bb227f41" containerName="registry-server" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.584614 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.600219 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zcmgr"] Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.732011 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jcbw\" (UniqueName: \"kubernetes.io/projected/4ce5f6af-3cb9-440b-956f-527b2df2254d-kube-api-access-9jcbw\") pod \"certified-operators-zcmgr\" (UID: \"4ce5f6af-3cb9-440b-956f-527b2df2254d\") " pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.732511 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ce5f6af-3cb9-440b-956f-527b2df2254d-utilities\") pod \"certified-operators-zcmgr\" (UID: \"4ce5f6af-3cb9-440b-956f-527b2df2254d\") " pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.732632 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ce5f6af-3cb9-440b-956f-527b2df2254d-catalog-content\") pod \"certified-operators-zcmgr\" (UID: \"4ce5f6af-3cb9-440b-956f-527b2df2254d\") " pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.834545 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jcbw\" (UniqueName: \"kubernetes.io/projected/4ce5f6af-3cb9-440b-956f-527b2df2254d-kube-api-access-9jcbw\") pod \"certified-operators-zcmgr\" (UID: \"4ce5f6af-3cb9-440b-956f-527b2df2254d\") " pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.834606 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ce5f6af-3cb9-440b-956f-527b2df2254d-utilities\") pod \"certified-operators-zcmgr\" (UID: \"4ce5f6af-3cb9-440b-956f-527b2df2254d\") " pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.834630 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ce5f6af-3cb9-440b-956f-527b2df2254d-catalog-content\") pod \"certified-operators-zcmgr\" (UID: \"4ce5f6af-3cb9-440b-956f-527b2df2254d\") " pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.835630 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ce5f6af-3cb9-440b-956f-527b2df2254d-catalog-content\") pod \"certified-operators-zcmgr\" (UID: \"4ce5f6af-3cb9-440b-956f-527b2df2254d\") " pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.836026 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ce5f6af-3cb9-440b-956f-527b2df2254d-utilities\") pod \"certified-operators-zcmgr\" (UID: \"4ce5f6af-3cb9-440b-956f-527b2df2254d\") " pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.873010 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jcbw\" (UniqueName: \"kubernetes.io/projected/4ce5f6af-3cb9-440b-956f-527b2df2254d-kube-api-access-9jcbw\") pod \"certified-operators-zcmgr\" (UID: \"4ce5f6af-3cb9-440b-956f-527b2df2254d\") " pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:29 crc kubenswrapper[5016]: I1211 11:43:29.961838 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:30 crc kubenswrapper[5016]: I1211 11:43:30.475189 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:43:30 crc kubenswrapper[5016]: E1211 11:43:30.475553 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:43:30 crc kubenswrapper[5016]: I1211 11:43:30.538970 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zcmgr"] Dec 11 11:43:31 crc kubenswrapper[5016]: I1211 11:43:31.090890 5016 generic.go:334] "Generic (PLEG): container finished" podID="4ce5f6af-3cb9-440b-956f-527b2df2254d" containerID="871078c9fbb8a89ad6faeda2eff4dca9d12d74fea9433932420150c22fbee711" exitCode=0 Dec 11 11:43:31 crc kubenswrapper[5016]: I1211 11:43:31.091446 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcmgr" event={"ID":"4ce5f6af-3cb9-440b-956f-527b2df2254d","Type":"ContainerDied","Data":"871078c9fbb8a89ad6faeda2eff4dca9d12d74fea9433932420150c22fbee711"} Dec 11 11:43:31 crc kubenswrapper[5016]: I1211 11:43:31.091614 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcmgr" event={"ID":"4ce5f6af-3cb9-440b-956f-527b2df2254d","Type":"ContainerStarted","Data":"55a27eb838ee4285cd68f6ce2943041fe88e6e39aa33987449c26daecb57d9fd"} Dec 11 11:43:32 crc kubenswrapper[5016]: I1211 11:43:32.107147 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcmgr" event={"ID":"4ce5f6af-3cb9-440b-956f-527b2df2254d","Type":"ContainerStarted","Data":"5ecbe067c839242d4439f60f333ce36291e9648f72ee82beb024bb1c15cd6da5"} Dec 11 11:43:33 crc kubenswrapper[5016]: I1211 11:43:33.120743 5016 generic.go:334] "Generic (PLEG): container finished" podID="4ce5f6af-3cb9-440b-956f-527b2df2254d" containerID="5ecbe067c839242d4439f60f333ce36291e9648f72ee82beb024bb1c15cd6da5" exitCode=0 Dec 11 11:43:33 crc kubenswrapper[5016]: I1211 11:43:33.120896 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcmgr" event={"ID":"4ce5f6af-3cb9-440b-956f-527b2df2254d","Type":"ContainerDied","Data":"5ecbe067c839242d4439f60f333ce36291e9648f72ee82beb024bb1c15cd6da5"} Dec 11 11:43:34 crc kubenswrapper[5016]: I1211 11:43:34.136067 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcmgr" event={"ID":"4ce5f6af-3cb9-440b-956f-527b2df2254d","Type":"ContainerStarted","Data":"533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896"} Dec 11 11:43:34 crc kubenswrapper[5016]: I1211 11:43:34.164829 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zcmgr" podStartSLOduration=2.387442728 podStartE2EDuration="5.164794222s" podCreationTimestamp="2025-12-11 11:43:29 +0000 UTC" firstStartedPulling="2025-12-11 11:43:31.096367578 +0000 UTC m=+4127.914927157" lastFinishedPulling="2025-12-11 11:43:33.873719052 +0000 UTC m=+4130.692278651" observedRunningTime="2025-12-11 11:43:34.156714673 +0000 UTC m=+4130.975274272" watchObservedRunningTime="2025-12-11 11:43:34.164794222 +0000 UTC m=+4130.983353821" Dec 11 11:43:39 crc kubenswrapper[5016]: I1211 11:43:39.962166 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:39 crc kubenswrapper[5016]: I1211 11:43:39.963344 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:40 crc kubenswrapper[5016]: I1211 11:43:40.028206 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:40 crc kubenswrapper[5016]: I1211 11:43:40.262215 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:40 crc kubenswrapper[5016]: I1211 11:43:40.345053 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zcmgr"] Dec 11 11:43:42 crc kubenswrapper[5016]: I1211 11:43:42.219761 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zcmgr" podUID="4ce5f6af-3cb9-440b-956f-527b2df2254d" containerName="registry-server" containerID="cri-o://533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896" gracePeriod=2 Dec 11 11:43:42 crc kubenswrapper[5016]: I1211 11:43:42.742384 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:42 crc kubenswrapper[5016]: I1211 11:43:42.868457 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ce5f6af-3cb9-440b-956f-527b2df2254d-catalog-content\") pod \"4ce5f6af-3cb9-440b-956f-527b2df2254d\" (UID: \"4ce5f6af-3cb9-440b-956f-527b2df2254d\") " Dec 11 11:43:42 crc kubenswrapper[5016]: I1211 11:43:42.868768 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jcbw\" (UniqueName: \"kubernetes.io/projected/4ce5f6af-3cb9-440b-956f-527b2df2254d-kube-api-access-9jcbw\") pod \"4ce5f6af-3cb9-440b-956f-527b2df2254d\" (UID: \"4ce5f6af-3cb9-440b-956f-527b2df2254d\") " Dec 11 11:43:42 crc kubenswrapper[5016]: I1211 11:43:42.868853 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ce5f6af-3cb9-440b-956f-527b2df2254d-utilities\") pod \"4ce5f6af-3cb9-440b-956f-527b2df2254d\" (UID: \"4ce5f6af-3cb9-440b-956f-527b2df2254d\") " Dec 11 11:43:42 crc kubenswrapper[5016]: I1211 11:43:42.869836 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ce5f6af-3cb9-440b-956f-527b2df2254d-utilities" (OuterVolumeSpecName: "utilities") pod "4ce5f6af-3cb9-440b-956f-527b2df2254d" (UID: "4ce5f6af-3cb9-440b-956f-527b2df2254d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:43:42 crc kubenswrapper[5016]: I1211 11:43:42.877739 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ce5f6af-3cb9-440b-956f-527b2df2254d-kube-api-access-9jcbw" (OuterVolumeSpecName: "kube-api-access-9jcbw") pod "4ce5f6af-3cb9-440b-956f-527b2df2254d" (UID: "4ce5f6af-3cb9-440b-956f-527b2df2254d"). InnerVolumeSpecName "kube-api-access-9jcbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:43:42 crc kubenswrapper[5016]: I1211 11:43:42.944165 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ce5f6af-3cb9-440b-956f-527b2df2254d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ce5f6af-3cb9-440b-956f-527b2df2254d" (UID: "4ce5f6af-3cb9-440b-956f-527b2df2254d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:43:42 crc kubenswrapper[5016]: I1211 11:43:42.972283 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jcbw\" (UniqueName: \"kubernetes.io/projected/4ce5f6af-3cb9-440b-956f-527b2df2254d-kube-api-access-9jcbw\") on node \"crc\" DevicePath \"\"" Dec 11 11:43:42 crc kubenswrapper[5016]: I1211 11:43:42.972354 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ce5f6af-3cb9-440b-956f-527b2df2254d-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:43:42 crc kubenswrapper[5016]: I1211 11:43:42.972366 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ce5f6af-3cb9-440b-956f-527b2df2254d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.232537 5016 generic.go:334] "Generic (PLEG): container finished" podID="4ce5f6af-3cb9-440b-956f-527b2df2254d" containerID="533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896" exitCode=0 Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.232591 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcmgr" event={"ID":"4ce5f6af-3cb9-440b-956f-527b2df2254d","Type":"ContainerDied","Data":"533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896"} Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.232627 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcmgr" event={"ID":"4ce5f6af-3cb9-440b-956f-527b2df2254d","Type":"ContainerDied","Data":"55a27eb838ee4285cd68f6ce2943041fe88e6e39aa33987449c26daecb57d9fd"} Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.232640 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zcmgr" Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.232647 5016 scope.go:117] "RemoveContainer" containerID="533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896" Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.255698 5016 scope.go:117] "RemoveContainer" containerID="5ecbe067c839242d4439f60f333ce36291e9648f72ee82beb024bb1c15cd6da5" Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.285894 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zcmgr"] Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.298345 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zcmgr"] Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.300732 5016 scope.go:117] "RemoveContainer" containerID="871078c9fbb8a89ad6faeda2eff4dca9d12d74fea9433932420150c22fbee711" Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.330734 5016 scope.go:117] "RemoveContainer" containerID="533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896" Dec 11 11:43:43 crc kubenswrapper[5016]: E1211 11:43:43.333851 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896\": container with ID starting with 533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896 not found: ID does not exist" containerID="533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896" Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.333904 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896"} err="failed to get container status \"533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896\": rpc error: code = NotFound desc = could not find container \"533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896\": container with ID starting with 533925e39d84b29ec6caf8f8e656b9db355e414927f5237546e9cfa668c33896 not found: ID does not exist" Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.333965 5016 scope.go:117] "RemoveContainer" containerID="5ecbe067c839242d4439f60f333ce36291e9648f72ee82beb024bb1c15cd6da5" Dec 11 11:43:43 crc kubenswrapper[5016]: E1211 11:43:43.334357 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ecbe067c839242d4439f60f333ce36291e9648f72ee82beb024bb1c15cd6da5\": container with ID starting with 5ecbe067c839242d4439f60f333ce36291e9648f72ee82beb024bb1c15cd6da5 not found: ID does not exist" containerID="5ecbe067c839242d4439f60f333ce36291e9648f72ee82beb024bb1c15cd6da5" Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.334406 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ecbe067c839242d4439f60f333ce36291e9648f72ee82beb024bb1c15cd6da5"} err="failed to get container status \"5ecbe067c839242d4439f60f333ce36291e9648f72ee82beb024bb1c15cd6da5\": rpc error: code = NotFound desc = could not find container \"5ecbe067c839242d4439f60f333ce36291e9648f72ee82beb024bb1c15cd6da5\": container with ID starting with 5ecbe067c839242d4439f60f333ce36291e9648f72ee82beb024bb1c15cd6da5 not found: ID does not exist" Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.334434 5016 scope.go:117] "RemoveContainer" containerID="871078c9fbb8a89ad6faeda2eff4dca9d12d74fea9433932420150c22fbee711" Dec 11 11:43:43 crc kubenswrapper[5016]: E1211 11:43:43.334922 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"871078c9fbb8a89ad6faeda2eff4dca9d12d74fea9433932420150c22fbee711\": container with ID starting with 871078c9fbb8a89ad6faeda2eff4dca9d12d74fea9433932420150c22fbee711 not found: ID does not exist" containerID="871078c9fbb8a89ad6faeda2eff4dca9d12d74fea9433932420150c22fbee711" Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.335051 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"871078c9fbb8a89ad6faeda2eff4dca9d12d74fea9433932420150c22fbee711"} err="failed to get container status \"871078c9fbb8a89ad6faeda2eff4dca9d12d74fea9433932420150c22fbee711\": rpc error: code = NotFound desc = could not find container \"871078c9fbb8a89ad6faeda2eff4dca9d12d74fea9433932420150c22fbee711\": container with ID starting with 871078c9fbb8a89ad6faeda2eff4dca9d12d74fea9433932420150c22fbee711 not found: ID does not exist" Dec 11 11:43:43 crc kubenswrapper[5016]: I1211 11:43:43.486109 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ce5f6af-3cb9-440b-956f-527b2df2254d" path="/var/lib/kubelet/pods/4ce5f6af-3cb9-440b-956f-527b2df2254d/volumes" Dec 11 11:43:45 crc kubenswrapper[5016]: I1211 11:43:45.474619 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:43:45 crc kubenswrapper[5016]: E1211 11:43:45.475241 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:43:56 crc kubenswrapper[5016]: I1211 11:43:56.475230 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:43:56 crc kubenswrapper[5016]: E1211 11:43:56.476470 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:44:10 crc kubenswrapper[5016]: I1211 11:44:10.474634 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:44:10 crc kubenswrapper[5016]: E1211 11:44:10.475537 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:44:13 crc kubenswrapper[5016]: I1211 11:44:13.211922 5016 scope.go:117] "RemoveContainer" containerID="72f14fc6c0d16fe6532242cbc5e326a8f33cb3863bc3221ab74d07c1c6931703" Dec 11 11:44:13 crc kubenswrapper[5016]: I1211 11:44:13.259046 5016 scope.go:117] "RemoveContainer" containerID="0ec9d53a6207b622ec90faaa2ecfa03d48e4c8f71d08923d71c95dbc3fc3875b" Dec 11 11:44:25 crc kubenswrapper[5016]: I1211 11:44:25.475478 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:44:26 crc kubenswrapper[5016]: I1211 11:44:26.758327 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"1ef2b4beefe05b953228fbd85b170d6d5a71b88779b80bfcc5c1cc7e2f4425ef"} Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.195883 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d"] Dec 11 11:45:00 crc kubenswrapper[5016]: E1211 11:45:00.197211 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ce5f6af-3cb9-440b-956f-527b2df2254d" containerName="extract-utilities" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.197230 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ce5f6af-3cb9-440b-956f-527b2df2254d" containerName="extract-utilities" Dec 11 11:45:00 crc kubenswrapper[5016]: E1211 11:45:00.197276 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ce5f6af-3cb9-440b-956f-527b2df2254d" containerName="extract-content" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.197283 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ce5f6af-3cb9-440b-956f-527b2df2254d" containerName="extract-content" Dec 11 11:45:00 crc kubenswrapper[5016]: E1211 11:45:00.197298 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ce5f6af-3cb9-440b-956f-527b2df2254d" containerName="registry-server" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.197304 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ce5f6af-3cb9-440b-956f-527b2df2254d" containerName="registry-server" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.197553 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ce5f6af-3cb9-440b-956f-527b2df2254d" containerName="registry-server" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.198479 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.202069 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.203526 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.209269 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d"] Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.224301 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/16c79257-d297-4de2-b30a-6217ab378d5d-config-volume\") pod \"collect-profiles-29424225-xqm9d\" (UID: \"16c79257-d297-4de2-b30a-6217ab378d5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.224419 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/16c79257-d297-4de2-b30a-6217ab378d5d-secret-volume\") pod \"collect-profiles-29424225-xqm9d\" (UID: \"16c79257-d297-4de2-b30a-6217ab378d5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.224732 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt5fj\" (UniqueName: \"kubernetes.io/projected/16c79257-d297-4de2-b30a-6217ab378d5d-kube-api-access-mt5fj\") pod \"collect-profiles-29424225-xqm9d\" (UID: \"16c79257-d297-4de2-b30a-6217ab378d5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.327749 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/16c79257-d297-4de2-b30a-6217ab378d5d-config-volume\") pod \"collect-profiles-29424225-xqm9d\" (UID: \"16c79257-d297-4de2-b30a-6217ab378d5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.327814 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/16c79257-d297-4de2-b30a-6217ab378d5d-secret-volume\") pod \"collect-profiles-29424225-xqm9d\" (UID: \"16c79257-d297-4de2-b30a-6217ab378d5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.327922 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt5fj\" (UniqueName: \"kubernetes.io/projected/16c79257-d297-4de2-b30a-6217ab378d5d-kube-api-access-mt5fj\") pod \"collect-profiles-29424225-xqm9d\" (UID: \"16c79257-d297-4de2-b30a-6217ab378d5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.329562 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/16c79257-d297-4de2-b30a-6217ab378d5d-config-volume\") pod \"collect-profiles-29424225-xqm9d\" (UID: \"16c79257-d297-4de2-b30a-6217ab378d5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.336285 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/16c79257-d297-4de2-b30a-6217ab378d5d-secret-volume\") pod \"collect-profiles-29424225-xqm9d\" (UID: \"16c79257-d297-4de2-b30a-6217ab378d5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.345342 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt5fj\" (UniqueName: \"kubernetes.io/projected/16c79257-d297-4de2-b30a-6217ab378d5d-kube-api-access-mt5fj\") pod \"collect-profiles-29424225-xqm9d\" (UID: \"16c79257-d297-4de2-b30a-6217ab378d5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:00 crc kubenswrapper[5016]: I1211 11:45:00.529265 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:01 crc kubenswrapper[5016]: I1211 11:45:01.016586 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d"] Dec 11 11:45:02 crc kubenswrapper[5016]: I1211 11:45:02.188667 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" event={"ID":"16c79257-d297-4de2-b30a-6217ab378d5d","Type":"ContainerStarted","Data":"0f4febca8f560364457b21653265edf8af09e9dc3f64f820a61069c622241cee"} Dec 11 11:45:02 crc kubenswrapper[5016]: I1211 11:45:02.189284 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" event={"ID":"16c79257-d297-4de2-b30a-6217ab378d5d","Type":"ContainerStarted","Data":"3af105a61f12303d5e462adb4dc2498902cd0684d2f8ab76b8c0c34342707633"} Dec 11 11:45:02 crc kubenswrapper[5016]: I1211 11:45:02.211492 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" podStartSLOduration=2.211461377 podStartE2EDuration="2.211461377s" podCreationTimestamp="2025-12-11 11:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 11:45:02.209324235 +0000 UTC m=+4219.027883824" watchObservedRunningTime="2025-12-11 11:45:02.211461377 +0000 UTC m=+4219.030020966" Dec 11 11:45:02 crc kubenswrapper[5016]: E1211 11:45:02.577595 5016 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod16c79257_d297_4de2_b30a_6217ab378d5d.slice/crio-conmon-0f4febca8f560364457b21653265edf8af09e9dc3f64f820a61069c622241cee.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod16c79257_d297_4de2_b30a_6217ab378d5d.slice/crio-0f4febca8f560364457b21653265edf8af09e9dc3f64f820a61069c622241cee.scope\": RecentStats: unable to find data in memory cache]" Dec 11 11:45:03 crc kubenswrapper[5016]: I1211 11:45:03.201025 5016 generic.go:334] "Generic (PLEG): container finished" podID="16c79257-d297-4de2-b30a-6217ab378d5d" containerID="0f4febca8f560364457b21653265edf8af09e9dc3f64f820a61069c622241cee" exitCode=0 Dec 11 11:45:03 crc kubenswrapper[5016]: I1211 11:45:03.201116 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" event={"ID":"16c79257-d297-4de2-b30a-6217ab378d5d","Type":"ContainerDied","Data":"0f4febca8f560364457b21653265edf8af09e9dc3f64f820a61069c622241cee"} Dec 11 11:45:04 crc kubenswrapper[5016]: I1211 11:45:04.693631 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:04 crc kubenswrapper[5016]: I1211 11:45:04.843163 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/16c79257-d297-4de2-b30a-6217ab378d5d-config-volume\") pod \"16c79257-d297-4de2-b30a-6217ab378d5d\" (UID: \"16c79257-d297-4de2-b30a-6217ab378d5d\") " Dec 11 11:45:04 crc kubenswrapper[5016]: I1211 11:45:04.843251 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mt5fj\" (UniqueName: \"kubernetes.io/projected/16c79257-d297-4de2-b30a-6217ab378d5d-kube-api-access-mt5fj\") pod \"16c79257-d297-4de2-b30a-6217ab378d5d\" (UID: \"16c79257-d297-4de2-b30a-6217ab378d5d\") " Dec 11 11:45:04 crc kubenswrapper[5016]: I1211 11:45:04.843614 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/16c79257-d297-4de2-b30a-6217ab378d5d-secret-volume\") pod \"16c79257-d297-4de2-b30a-6217ab378d5d\" (UID: \"16c79257-d297-4de2-b30a-6217ab378d5d\") " Dec 11 11:45:04 crc kubenswrapper[5016]: I1211 11:45:04.844541 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16c79257-d297-4de2-b30a-6217ab378d5d-config-volume" (OuterVolumeSpecName: "config-volume") pod "16c79257-d297-4de2-b30a-6217ab378d5d" (UID: "16c79257-d297-4de2-b30a-6217ab378d5d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 11 11:45:04 crc kubenswrapper[5016]: I1211 11:45:04.851159 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16c79257-d297-4de2-b30a-6217ab378d5d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "16c79257-d297-4de2-b30a-6217ab378d5d" (UID: "16c79257-d297-4de2-b30a-6217ab378d5d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 11 11:45:04 crc kubenswrapper[5016]: I1211 11:45:04.851670 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16c79257-d297-4de2-b30a-6217ab378d5d-kube-api-access-mt5fj" (OuterVolumeSpecName: "kube-api-access-mt5fj") pod "16c79257-d297-4de2-b30a-6217ab378d5d" (UID: "16c79257-d297-4de2-b30a-6217ab378d5d"). InnerVolumeSpecName "kube-api-access-mt5fj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:45:04 crc kubenswrapper[5016]: I1211 11:45:04.946301 5016 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/16c79257-d297-4de2-b30a-6217ab378d5d-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 11 11:45:04 crc kubenswrapper[5016]: I1211 11:45:04.946365 5016 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/16c79257-d297-4de2-b30a-6217ab378d5d-config-volume\") on node \"crc\" DevicePath \"\"" Dec 11 11:45:04 crc kubenswrapper[5016]: I1211 11:45:04.946383 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mt5fj\" (UniqueName: \"kubernetes.io/projected/16c79257-d297-4de2-b30a-6217ab378d5d-kube-api-access-mt5fj\") on node \"crc\" DevicePath \"\"" Dec 11 11:45:05 crc kubenswrapper[5016]: I1211 11:45:05.225507 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" event={"ID":"16c79257-d297-4de2-b30a-6217ab378d5d","Type":"ContainerDied","Data":"3af105a61f12303d5e462adb4dc2498902cd0684d2f8ab76b8c0c34342707633"} Dec 11 11:45:05 crc kubenswrapper[5016]: I1211 11:45:05.225576 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3af105a61f12303d5e462adb4dc2498902cd0684d2f8ab76b8c0c34342707633" Dec 11 11:45:05 crc kubenswrapper[5016]: I1211 11:45:05.225607 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29424225-xqm9d" Dec 11 11:45:05 crc kubenswrapper[5016]: I1211 11:45:05.318816 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp"] Dec 11 11:45:05 crc kubenswrapper[5016]: I1211 11:45:05.328612 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29424180-slbkp"] Dec 11 11:45:05 crc kubenswrapper[5016]: I1211 11:45:05.494676 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1172fa02-d3bf-4eb3-96f0-f9f224625c46" path="/var/lib/kubelet/pods/1172fa02-d3bf-4eb3-96f0-f9f224625c46/volumes" Dec 11 11:45:13 crc kubenswrapper[5016]: I1211 11:45:13.398926 5016 scope.go:117] "RemoveContainer" containerID="03965dfb551484cc6504c58ac9408103b6430b3bfebe9cb08712f3ecee1cbccc" Dec 11 11:45:13 crc kubenswrapper[5016]: I1211 11:45:13.424749 5016 scope.go:117] "RemoveContainer" containerID="2ae03d4b556f6469d86694bf9124fc366d661bc630fa22ba3058509a2a9b3e92" Dec 11 11:45:48 crc kubenswrapper[5016]: I1211 11:45:48.774457 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zbtll/must-gather-56hjr"] Dec 11 11:45:48 crc kubenswrapper[5016]: E1211 11:45:48.776048 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16c79257-d297-4de2-b30a-6217ab378d5d" containerName="collect-profiles" Dec 11 11:45:48 crc kubenswrapper[5016]: I1211 11:45:48.776069 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="16c79257-d297-4de2-b30a-6217ab378d5d" containerName="collect-profiles" Dec 11 11:45:48 crc kubenswrapper[5016]: I1211 11:45:48.776381 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="16c79257-d297-4de2-b30a-6217ab378d5d" containerName="collect-profiles" Dec 11 11:45:48 crc kubenswrapper[5016]: I1211 11:45:48.780340 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/must-gather-56hjr" Dec 11 11:45:48 crc kubenswrapper[5016]: I1211 11:45:48.789894 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-zbtll"/"openshift-service-ca.crt" Dec 11 11:45:48 crc kubenswrapper[5016]: I1211 11:45:48.789918 5016 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-zbtll"/"kube-root-ca.crt" Dec 11 11:45:48 crc kubenswrapper[5016]: I1211 11:45:48.811138 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-zbtll/must-gather-56hjr"] Dec 11 11:45:48 crc kubenswrapper[5016]: I1211 11:45:48.957869 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/40426f6c-f429-48d5-be22-60e0c4009823-must-gather-output\") pod \"must-gather-56hjr\" (UID: \"40426f6c-f429-48d5-be22-60e0c4009823\") " pod="openshift-must-gather-zbtll/must-gather-56hjr" Dec 11 11:45:48 crc kubenswrapper[5016]: I1211 11:45:48.957982 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdd92\" (UniqueName: \"kubernetes.io/projected/40426f6c-f429-48d5-be22-60e0c4009823-kube-api-access-rdd92\") pod \"must-gather-56hjr\" (UID: \"40426f6c-f429-48d5-be22-60e0c4009823\") " pod="openshift-must-gather-zbtll/must-gather-56hjr" Dec 11 11:45:49 crc kubenswrapper[5016]: I1211 11:45:49.060244 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/40426f6c-f429-48d5-be22-60e0c4009823-must-gather-output\") pod \"must-gather-56hjr\" (UID: \"40426f6c-f429-48d5-be22-60e0c4009823\") " pod="openshift-must-gather-zbtll/must-gather-56hjr" Dec 11 11:45:49 crc kubenswrapper[5016]: I1211 11:45:49.060401 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdd92\" (UniqueName: \"kubernetes.io/projected/40426f6c-f429-48d5-be22-60e0c4009823-kube-api-access-rdd92\") pod \"must-gather-56hjr\" (UID: \"40426f6c-f429-48d5-be22-60e0c4009823\") " pod="openshift-must-gather-zbtll/must-gather-56hjr" Dec 11 11:45:49 crc kubenswrapper[5016]: I1211 11:45:49.060852 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/40426f6c-f429-48d5-be22-60e0c4009823-must-gather-output\") pod \"must-gather-56hjr\" (UID: \"40426f6c-f429-48d5-be22-60e0c4009823\") " pod="openshift-must-gather-zbtll/must-gather-56hjr" Dec 11 11:45:49 crc kubenswrapper[5016]: I1211 11:45:49.091047 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdd92\" (UniqueName: \"kubernetes.io/projected/40426f6c-f429-48d5-be22-60e0c4009823-kube-api-access-rdd92\") pod \"must-gather-56hjr\" (UID: \"40426f6c-f429-48d5-be22-60e0c4009823\") " pod="openshift-must-gather-zbtll/must-gather-56hjr" Dec 11 11:45:49 crc kubenswrapper[5016]: I1211 11:45:49.108872 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/must-gather-56hjr" Dec 11 11:45:49 crc kubenswrapper[5016]: I1211 11:45:49.733061 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-zbtll/must-gather-56hjr"] Dec 11 11:45:49 crc kubenswrapper[5016]: W1211 11:45:49.745602 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40426f6c_f429_48d5_be22_60e0c4009823.slice/crio-a5be08a3ab11b72a746e006c6d02227f70839cd82c9512c715c9fa305fe01a6c WatchSource:0}: Error finding container a5be08a3ab11b72a746e006c6d02227f70839cd82c9512c715c9fa305fe01a6c: Status 404 returned error can't find the container with id a5be08a3ab11b72a746e006c6d02227f70839cd82c9512c715c9fa305fe01a6c Dec 11 11:45:50 crc kubenswrapper[5016]: I1211 11:45:50.742727 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zbtll/must-gather-56hjr" event={"ID":"40426f6c-f429-48d5-be22-60e0c4009823","Type":"ContainerStarted","Data":"d44abe53a9bdbd50dc938dbbd585fd45700625d66d9f31d6e9a7d834c587843e"} Dec 11 11:45:50 crc kubenswrapper[5016]: I1211 11:45:50.743218 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zbtll/must-gather-56hjr" event={"ID":"40426f6c-f429-48d5-be22-60e0c4009823","Type":"ContainerStarted","Data":"413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f"} Dec 11 11:45:50 crc kubenswrapper[5016]: I1211 11:45:50.743237 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zbtll/must-gather-56hjr" event={"ID":"40426f6c-f429-48d5-be22-60e0c4009823","Type":"ContainerStarted","Data":"a5be08a3ab11b72a746e006c6d02227f70839cd82c9512c715c9fa305fe01a6c"} Dec 11 11:45:50 crc kubenswrapper[5016]: I1211 11:45:50.771067 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-zbtll/must-gather-56hjr" podStartSLOduration=2.771044988 podStartE2EDuration="2.771044988s" podCreationTimestamp="2025-12-11 11:45:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 11:45:50.762631481 +0000 UTC m=+4267.581191060" watchObservedRunningTime="2025-12-11 11:45:50.771044988 +0000 UTC m=+4267.589604567" Dec 11 11:45:54 crc kubenswrapper[5016]: I1211 11:45:54.790345 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zbtll/crc-debug-84jh5"] Dec 11 11:45:54 crc kubenswrapper[5016]: I1211 11:45:54.792861 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/crc-debug-84jh5" Dec 11 11:45:54 crc kubenswrapper[5016]: I1211 11:45:54.795457 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-zbtll"/"default-dockercfg-xffmk" Dec 11 11:45:54 crc kubenswrapper[5016]: I1211 11:45:54.932548 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4pn7\" (UniqueName: \"kubernetes.io/projected/9738eef4-843f-4685-b55b-22e54a47e138-kube-api-access-z4pn7\") pod \"crc-debug-84jh5\" (UID: \"9738eef4-843f-4685-b55b-22e54a47e138\") " pod="openshift-must-gather-zbtll/crc-debug-84jh5" Dec 11 11:45:54 crc kubenswrapper[5016]: I1211 11:45:54.932687 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9738eef4-843f-4685-b55b-22e54a47e138-host\") pod \"crc-debug-84jh5\" (UID: \"9738eef4-843f-4685-b55b-22e54a47e138\") " pod="openshift-must-gather-zbtll/crc-debug-84jh5" Dec 11 11:45:55 crc kubenswrapper[5016]: I1211 11:45:55.034575 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9738eef4-843f-4685-b55b-22e54a47e138-host\") pod \"crc-debug-84jh5\" (UID: \"9738eef4-843f-4685-b55b-22e54a47e138\") " pod="openshift-must-gather-zbtll/crc-debug-84jh5" Dec 11 11:45:55 crc kubenswrapper[5016]: I1211 11:45:55.034709 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9738eef4-843f-4685-b55b-22e54a47e138-host\") pod \"crc-debug-84jh5\" (UID: \"9738eef4-843f-4685-b55b-22e54a47e138\") " pod="openshift-must-gather-zbtll/crc-debug-84jh5" Dec 11 11:45:55 crc kubenswrapper[5016]: I1211 11:45:55.034718 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4pn7\" (UniqueName: \"kubernetes.io/projected/9738eef4-843f-4685-b55b-22e54a47e138-kube-api-access-z4pn7\") pod \"crc-debug-84jh5\" (UID: \"9738eef4-843f-4685-b55b-22e54a47e138\") " pod="openshift-must-gather-zbtll/crc-debug-84jh5" Dec 11 11:45:55 crc kubenswrapper[5016]: I1211 11:45:55.663669 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4pn7\" (UniqueName: \"kubernetes.io/projected/9738eef4-843f-4685-b55b-22e54a47e138-kube-api-access-z4pn7\") pod \"crc-debug-84jh5\" (UID: \"9738eef4-843f-4685-b55b-22e54a47e138\") " pod="openshift-must-gather-zbtll/crc-debug-84jh5" Dec 11 11:45:55 crc kubenswrapper[5016]: I1211 11:45:55.716122 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/crc-debug-84jh5" Dec 11 11:45:55 crc kubenswrapper[5016]: W1211 11:45:55.829447 5016 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9738eef4_843f_4685_b55b_22e54a47e138.slice/crio-eb06ca11f636da70b6c3e0980a46334da6fc2d506c6c2eb1cfef3b141de01fbb WatchSource:0}: Error finding container eb06ca11f636da70b6c3e0980a46334da6fc2d506c6c2eb1cfef3b141de01fbb: Status 404 returned error can't find the container with id eb06ca11f636da70b6c3e0980a46334da6fc2d506c6c2eb1cfef3b141de01fbb Dec 11 11:45:56 crc kubenswrapper[5016]: I1211 11:45:56.818165 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zbtll/crc-debug-84jh5" event={"ID":"9738eef4-843f-4685-b55b-22e54a47e138","Type":"ContainerStarted","Data":"4f9a3e39f32faf8b4999b04eac689e6601497dc53506b91d90a7366e6a72c554"} Dec 11 11:45:56 crc kubenswrapper[5016]: I1211 11:45:56.819779 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zbtll/crc-debug-84jh5" event={"ID":"9738eef4-843f-4685-b55b-22e54a47e138","Type":"ContainerStarted","Data":"eb06ca11f636da70b6c3e0980a46334da6fc2d506c6c2eb1cfef3b141de01fbb"} Dec 11 11:45:56 crc kubenswrapper[5016]: I1211 11:45:56.843118 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-zbtll/crc-debug-84jh5" podStartSLOduration=2.843086851 podStartE2EDuration="2.843086851s" podCreationTimestamp="2025-12-11 11:45:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-11 11:45:56.836252252 +0000 UTC m=+4273.654811851" watchObservedRunningTime="2025-12-11 11:45:56.843086851 +0000 UTC m=+4273.661646430" Dec 11 11:46:37 crc kubenswrapper[5016]: I1211 11:46:37.235071 5016 generic.go:334] "Generic (PLEG): container finished" podID="9738eef4-843f-4685-b55b-22e54a47e138" containerID="4f9a3e39f32faf8b4999b04eac689e6601497dc53506b91d90a7366e6a72c554" exitCode=0 Dec 11 11:46:37 crc kubenswrapper[5016]: I1211 11:46:37.235175 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zbtll/crc-debug-84jh5" event={"ID":"9738eef4-843f-4685-b55b-22e54a47e138","Type":"ContainerDied","Data":"4f9a3e39f32faf8b4999b04eac689e6601497dc53506b91d90a7366e6a72c554"} Dec 11 11:46:38 crc kubenswrapper[5016]: I1211 11:46:38.372691 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/crc-debug-84jh5" Dec 11 11:46:38 crc kubenswrapper[5016]: I1211 11:46:38.413344 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zbtll/crc-debug-84jh5"] Dec 11 11:46:38 crc kubenswrapper[5016]: I1211 11:46:38.422183 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zbtll/crc-debug-84jh5"] Dec 11 11:46:38 crc kubenswrapper[5016]: I1211 11:46:38.465788 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4pn7\" (UniqueName: \"kubernetes.io/projected/9738eef4-843f-4685-b55b-22e54a47e138-kube-api-access-z4pn7\") pod \"9738eef4-843f-4685-b55b-22e54a47e138\" (UID: \"9738eef4-843f-4685-b55b-22e54a47e138\") " Dec 11 11:46:38 crc kubenswrapper[5016]: I1211 11:46:38.466076 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9738eef4-843f-4685-b55b-22e54a47e138-host\") pod \"9738eef4-843f-4685-b55b-22e54a47e138\" (UID: \"9738eef4-843f-4685-b55b-22e54a47e138\") " Dec 11 11:46:38 crc kubenswrapper[5016]: I1211 11:46:38.466487 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9738eef4-843f-4685-b55b-22e54a47e138-host" (OuterVolumeSpecName: "host") pod "9738eef4-843f-4685-b55b-22e54a47e138" (UID: "9738eef4-843f-4685-b55b-22e54a47e138"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 11:46:38 crc kubenswrapper[5016]: I1211 11:46:38.466783 5016 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9738eef4-843f-4685-b55b-22e54a47e138-host\") on node \"crc\" DevicePath \"\"" Dec 11 11:46:38 crc kubenswrapper[5016]: I1211 11:46:38.475320 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9738eef4-843f-4685-b55b-22e54a47e138-kube-api-access-z4pn7" (OuterVolumeSpecName: "kube-api-access-z4pn7") pod "9738eef4-843f-4685-b55b-22e54a47e138" (UID: "9738eef4-843f-4685-b55b-22e54a47e138"). InnerVolumeSpecName "kube-api-access-z4pn7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:46:38 crc kubenswrapper[5016]: I1211 11:46:38.568748 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4pn7\" (UniqueName: \"kubernetes.io/projected/9738eef4-843f-4685-b55b-22e54a47e138-kube-api-access-z4pn7\") on node \"crc\" DevicePath \"\"" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.260404 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb06ca11f636da70b6c3e0980a46334da6fc2d506c6c2eb1cfef3b141de01fbb" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.260534 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/crc-debug-84jh5" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.488083 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9738eef4-843f-4685-b55b-22e54a47e138" path="/var/lib/kubelet/pods/9738eef4-843f-4685-b55b-22e54a47e138/volumes" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.658547 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zbtll/crc-debug-tpjfj"] Dec 11 11:46:39 crc kubenswrapper[5016]: E1211 11:46:39.659132 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9738eef4-843f-4685-b55b-22e54a47e138" containerName="container-00" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.659151 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="9738eef4-843f-4685-b55b-22e54a47e138" containerName="container-00" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.659363 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="9738eef4-843f-4685-b55b-22e54a47e138" containerName="container-00" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.660228 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/crc-debug-tpjfj" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.662905 5016 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-zbtll"/"default-dockercfg-xffmk" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.797079 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50-host\") pod \"crc-debug-tpjfj\" (UID: \"f47675b5-ef4c-4a2d-a41f-c8de46d5cc50\") " pod="openshift-must-gather-zbtll/crc-debug-tpjfj" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.797163 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxwkw\" (UniqueName: \"kubernetes.io/projected/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50-kube-api-access-hxwkw\") pod \"crc-debug-tpjfj\" (UID: \"f47675b5-ef4c-4a2d-a41f-c8de46d5cc50\") " pod="openshift-must-gather-zbtll/crc-debug-tpjfj" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.900258 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50-host\") pod \"crc-debug-tpjfj\" (UID: \"f47675b5-ef4c-4a2d-a41f-c8de46d5cc50\") " pod="openshift-must-gather-zbtll/crc-debug-tpjfj" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.900388 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxwkw\" (UniqueName: \"kubernetes.io/projected/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50-kube-api-access-hxwkw\") pod \"crc-debug-tpjfj\" (UID: \"f47675b5-ef4c-4a2d-a41f-c8de46d5cc50\") " pod="openshift-must-gather-zbtll/crc-debug-tpjfj" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.900446 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50-host\") pod \"crc-debug-tpjfj\" (UID: \"f47675b5-ef4c-4a2d-a41f-c8de46d5cc50\") " pod="openshift-must-gather-zbtll/crc-debug-tpjfj" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.923828 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxwkw\" (UniqueName: \"kubernetes.io/projected/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50-kube-api-access-hxwkw\") pod \"crc-debug-tpjfj\" (UID: \"f47675b5-ef4c-4a2d-a41f-c8de46d5cc50\") " pod="openshift-must-gather-zbtll/crc-debug-tpjfj" Dec 11 11:46:39 crc kubenswrapper[5016]: I1211 11:46:39.979097 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/crc-debug-tpjfj" Dec 11 11:46:40 crc kubenswrapper[5016]: I1211 11:46:40.273133 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zbtll/crc-debug-tpjfj" event={"ID":"f47675b5-ef4c-4a2d-a41f-c8de46d5cc50","Type":"ContainerStarted","Data":"0739c7566145ef63d5ca8142be2daab0c5b3c2ff513361c5fe153adbe7d8cc31"} Dec 11 11:46:41 crc kubenswrapper[5016]: I1211 11:46:41.293119 5016 generic.go:334] "Generic (PLEG): container finished" podID="f47675b5-ef4c-4a2d-a41f-c8de46d5cc50" containerID="7c80a5dbf7187132bf97bb747f24c71473106c1bcb4f6d344c8759592f262daa" exitCode=0 Dec 11 11:46:41 crc kubenswrapper[5016]: I1211 11:46:41.293336 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zbtll/crc-debug-tpjfj" event={"ID":"f47675b5-ef4c-4a2d-a41f-c8de46d5cc50","Type":"ContainerDied","Data":"7c80a5dbf7187132bf97bb747f24c71473106c1bcb4f6d344c8759592f262daa"} Dec 11 11:46:41 crc kubenswrapper[5016]: I1211 11:46:41.749871 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zbtll/crc-debug-tpjfj"] Dec 11 11:46:41 crc kubenswrapper[5016]: I1211 11:46:41.765390 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zbtll/crc-debug-tpjfj"] Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.409281 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/crc-debug-tpjfj" Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.563653 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxwkw\" (UniqueName: \"kubernetes.io/projected/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50-kube-api-access-hxwkw\") pod \"f47675b5-ef4c-4a2d-a41f-c8de46d5cc50\" (UID: \"f47675b5-ef4c-4a2d-a41f-c8de46d5cc50\") " Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.564193 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50-host\") pod \"f47675b5-ef4c-4a2d-a41f-c8de46d5cc50\" (UID: \"f47675b5-ef4c-4a2d-a41f-c8de46d5cc50\") " Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.564375 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50-host" (OuterVolumeSpecName: "host") pod "f47675b5-ef4c-4a2d-a41f-c8de46d5cc50" (UID: "f47675b5-ef4c-4a2d-a41f-c8de46d5cc50"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.564908 5016 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50-host\") on node \"crc\" DevicePath \"\"" Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.575363 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50-kube-api-access-hxwkw" (OuterVolumeSpecName: "kube-api-access-hxwkw") pod "f47675b5-ef4c-4a2d-a41f-c8de46d5cc50" (UID: "f47675b5-ef4c-4a2d-a41f-c8de46d5cc50"). InnerVolumeSpecName "kube-api-access-hxwkw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.668156 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxwkw\" (UniqueName: \"kubernetes.io/projected/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50-kube-api-access-hxwkw\") on node \"crc\" DevicePath \"\"" Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.933315 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.933843 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.958322 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zbtll/crc-debug-5zqjm"] Dec 11 11:46:42 crc kubenswrapper[5016]: E1211 11:46:42.958822 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f47675b5-ef4c-4a2d-a41f-c8de46d5cc50" containerName="container-00" Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.958842 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="f47675b5-ef4c-4a2d-a41f-c8de46d5cc50" containerName="container-00" Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.959140 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="f47675b5-ef4c-4a2d-a41f-c8de46d5cc50" containerName="container-00" Dec 11 11:46:42 crc kubenswrapper[5016]: I1211 11:46:42.959898 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/crc-debug-5zqjm" Dec 11 11:46:43 crc kubenswrapper[5016]: I1211 11:46:43.076872 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/620b7155-17cf-4017-83a6-57befbb3e08b-host\") pod \"crc-debug-5zqjm\" (UID: \"620b7155-17cf-4017-83a6-57befbb3e08b\") " pod="openshift-must-gather-zbtll/crc-debug-5zqjm" Dec 11 11:46:43 crc kubenswrapper[5016]: I1211 11:46:43.077245 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrsvz\" (UniqueName: \"kubernetes.io/projected/620b7155-17cf-4017-83a6-57befbb3e08b-kube-api-access-rrsvz\") pod \"crc-debug-5zqjm\" (UID: \"620b7155-17cf-4017-83a6-57befbb3e08b\") " pod="openshift-must-gather-zbtll/crc-debug-5zqjm" Dec 11 11:46:43 crc kubenswrapper[5016]: I1211 11:46:43.179459 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrsvz\" (UniqueName: \"kubernetes.io/projected/620b7155-17cf-4017-83a6-57befbb3e08b-kube-api-access-rrsvz\") pod \"crc-debug-5zqjm\" (UID: \"620b7155-17cf-4017-83a6-57befbb3e08b\") " pod="openshift-must-gather-zbtll/crc-debug-5zqjm" Dec 11 11:46:43 crc kubenswrapper[5016]: I1211 11:46:43.179546 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/620b7155-17cf-4017-83a6-57befbb3e08b-host\") pod \"crc-debug-5zqjm\" (UID: \"620b7155-17cf-4017-83a6-57befbb3e08b\") " pod="openshift-must-gather-zbtll/crc-debug-5zqjm" Dec 11 11:46:43 crc kubenswrapper[5016]: I1211 11:46:43.179701 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/620b7155-17cf-4017-83a6-57befbb3e08b-host\") pod \"crc-debug-5zqjm\" (UID: \"620b7155-17cf-4017-83a6-57befbb3e08b\") " pod="openshift-must-gather-zbtll/crc-debug-5zqjm" Dec 11 11:46:43 crc kubenswrapper[5016]: I1211 11:46:43.313785 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0739c7566145ef63d5ca8142be2daab0c5b3c2ff513361c5fe153adbe7d8cc31" Dec 11 11:46:43 crc kubenswrapper[5016]: I1211 11:46:43.313876 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/crc-debug-tpjfj" Dec 11 11:46:43 crc kubenswrapper[5016]: I1211 11:46:43.457611 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrsvz\" (UniqueName: \"kubernetes.io/projected/620b7155-17cf-4017-83a6-57befbb3e08b-kube-api-access-rrsvz\") pod \"crc-debug-5zqjm\" (UID: \"620b7155-17cf-4017-83a6-57befbb3e08b\") " pod="openshift-must-gather-zbtll/crc-debug-5zqjm" Dec 11 11:46:43 crc kubenswrapper[5016]: I1211 11:46:43.494142 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f47675b5-ef4c-4a2d-a41f-c8de46d5cc50" path="/var/lib/kubelet/pods/f47675b5-ef4c-4a2d-a41f-c8de46d5cc50/volumes" Dec 11 11:46:43 crc kubenswrapper[5016]: I1211 11:46:43.587109 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/crc-debug-5zqjm" Dec 11 11:46:44 crc kubenswrapper[5016]: I1211 11:46:44.332669 5016 generic.go:334] "Generic (PLEG): container finished" podID="620b7155-17cf-4017-83a6-57befbb3e08b" containerID="9d5a55cb1e8f519bfc5b176f8ce51306fde243edb8c5f6a6dc6ac1f2377b889e" exitCode=0 Dec 11 11:46:44 crc kubenswrapper[5016]: I1211 11:46:44.333482 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zbtll/crc-debug-5zqjm" event={"ID":"620b7155-17cf-4017-83a6-57befbb3e08b","Type":"ContainerDied","Data":"9d5a55cb1e8f519bfc5b176f8ce51306fde243edb8c5f6a6dc6ac1f2377b889e"} Dec 11 11:46:44 crc kubenswrapper[5016]: I1211 11:46:44.333563 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zbtll/crc-debug-5zqjm" event={"ID":"620b7155-17cf-4017-83a6-57befbb3e08b","Type":"ContainerStarted","Data":"446a20626846f6a05a749ba486d65659e9301100e0771e51362c220befc17cda"} Dec 11 11:46:44 crc kubenswrapper[5016]: I1211 11:46:44.383050 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zbtll/crc-debug-5zqjm"] Dec 11 11:46:44 crc kubenswrapper[5016]: I1211 11:46:44.393360 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zbtll/crc-debug-5zqjm"] Dec 11 11:46:45 crc kubenswrapper[5016]: I1211 11:46:45.438925 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/crc-debug-5zqjm" Dec 11 11:46:45 crc kubenswrapper[5016]: I1211 11:46:45.533035 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrsvz\" (UniqueName: \"kubernetes.io/projected/620b7155-17cf-4017-83a6-57befbb3e08b-kube-api-access-rrsvz\") pod \"620b7155-17cf-4017-83a6-57befbb3e08b\" (UID: \"620b7155-17cf-4017-83a6-57befbb3e08b\") " Dec 11 11:46:45 crc kubenswrapper[5016]: I1211 11:46:45.534708 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/620b7155-17cf-4017-83a6-57befbb3e08b-host\") pod \"620b7155-17cf-4017-83a6-57befbb3e08b\" (UID: \"620b7155-17cf-4017-83a6-57befbb3e08b\") " Dec 11 11:46:45 crc kubenswrapper[5016]: I1211 11:46:45.534874 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/620b7155-17cf-4017-83a6-57befbb3e08b-host" (OuterVolumeSpecName: "host") pod "620b7155-17cf-4017-83a6-57befbb3e08b" (UID: "620b7155-17cf-4017-83a6-57befbb3e08b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 11 11:46:45 crc kubenswrapper[5016]: I1211 11:46:45.535586 5016 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/620b7155-17cf-4017-83a6-57befbb3e08b-host\") on node \"crc\" DevicePath \"\"" Dec 11 11:46:45 crc kubenswrapper[5016]: I1211 11:46:45.540522 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/620b7155-17cf-4017-83a6-57befbb3e08b-kube-api-access-rrsvz" (OuterVolumeSpecName: "kube-api-access-rrsvz") pod "620b7155-17cf-4017-83a6-57befbb3e08b" (UID: "620b7155-17cf-4017-83a6-57befbb3e08b"). InnerVolumeSpecName "kube-api-access-rrsvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:46:45 crc kubenswrapper[5016]: I1211 11:46:45.637638 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrsvz\" (UniqueName: \"kubernetes.io/projected/620b7155-17cf-4017-83a6-57befbb3e08b-kube-api-access-rrsvz\") on node \"crc\" DevicePath \"\"" Dec 11 11:46:46 crc kubenswrapper[5016]: I1211 11:46:46.370224 5016 scope.go:117] "RemoveContainer" containerID="9d5a55cb1e8f519bfc5b176f8ce51306fde243edb8c5f6a6dc6ac1f2377b889e" Dec 11 11:46:46 crc kubenswrapper[5016]: I1211 11:46:46.370792 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/crc-debug-5zqjm" Dec 11 11:46:47 crc kubenswrapper[5016]: I1211 11:46:47.486382 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="620b7155-17cf-4017-83a6-57befbb3e08b" path="/var/lib/kubelet/pods/620b7155-17cf-4017-83a6-57befbb3e08b/volumes" Dec 11 11:47:11 crc kubenswrapper[5016]: I1211 11:47:11.948411 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-75d7945896-vvw5x_42eab2b8-1142-4d4f-bb8a-58736349fd7e/barbican-api/0.log" Dec 11 11:47:12 crc kubenswrapper[5016]: I1211 11:47:12.148407 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-75d7945896-vvw5x_42eab2b8-1142-4d4f-bb8a-58736349fd7e/barbican-api-log/0.log" Dec 11 11:47:12 crc kubenswrapper[5016]: I1211 11:47:12.148789 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-559df4c4fd-rpdct_2ad5059d-bfd5-4ea8-8d6a-898cd592e49d/barbican-keystone-listener/0.log" Dec 11 11:47:12 crc kubenswrapper[5016]: I1211 11:47:12.232086 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-559df4c4fd-rpdct_2ad5059d-bfd5-4ea8-8d6a-898cd592e49d/barbican-keystone-listener-log/0.log" Dec 11 11:47:12 crc kubenswrapper[5016]: I1211 11:47:12.383015 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5f9f9b6559-f78rz_266c2ca6-fea6-4f3d-8796-bd0db83f2bf0/barbican-worker-log/0.log" Dec 11 11:47:12 crc kubenswrapper[5016]: I1211 11:47:12.390089 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5f9f9b6559-f78rz_266c2ca6-fea6-4f3d-8796-bd0db83f2bf0/barbican-worker/0.log" Dec 11 11:47:12 crc kubenswrapper[5016]: I1211 11:47:12.638528 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-twq5b_10792fe7-d5d5-4918-8658-20331647f302/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:12 crc kubenswrapper[5016]: I1211 11:47:12.712070 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_fd3155c0-9091-4e5e-888d-67b0256b0b51/ceilometer-notification-agent/0.log" Dec 11 11:47:12 crc kubenswrapper[5016]: I1211 11:47:12.752582 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_fd3155c0-9091-4e5e-888d-67b0256b0b51/ceilometer-central-agent/0.log" Dec 11 11:47:12 crc kubenswrapper[5016]: I1211 11:47:12.922623 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_fd3155c0-9091-4e5e-888d-67b0256b0b51/sg-core/0.log" Dec 11 11:47:12 crc kubenswrapper[5016]: I1211 11:47:12.932561 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:47:12 crc kubenswrapper[5016]: I1211 11:47:12.932634 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:47:12 crc kubenswrapper[5016]: I1211 11:47:12.960608 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_fd3155c0-9091-4e5e-888d-67b0256b0b51/proxy-httpd/0.log" Dec 11 11:47:13 crc kubenswrapper[5016]: I1211 11:47:13.076471 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_40789e09-e7ca-4ce3-8939-9ab2605e257f/cinder-api/0.log" Dec 11 11:47:13 crc kubenswrapper[5016]: I1211 11:47:13.148122 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_40789e09-e7ca-4ce3-8939-9ab2605e257f/cinder-api-log/0.log" Dec 11 11:47:13 crc kubenswrapper[5016]: I1211 11:47:13.372036 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_0f265b9d-c475-455f-9fe7-05070efd4ec1/probe/0.log" Dec 11 11:47:13 crc kubenswrapper[5016]: I1211 11:47:13.386627 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_0f265b9d-c475-455f-9fe7-05070efd4ec1/cinder-scheduler/0.log" Dec 11 11:47:13 crc kubenswrapper[5016]: I1211 11:47:13.517553 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-vkpwh_6b68b5b9-fe7e-4340-8541-71c6f8b80f3f/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:13 crc kubenswrapper[5016]: I1211 11:47:13.599869 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-rckrh_baa674c6-426d-428e-af4a-dbff72b93714/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:14 crc kubenswrapper[5016]: I1211 11:47:14.002418 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-8nsnc_18f4fb70-2aaa-471a-9556-b0977ad6ec55/init/0.log" Dec 11 11:47:14 crc kubenswrapper[5016]: I1211 11:47:14.177840 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-8nsnc_18f4fb70-2aaa-471a-9556-b0977ad6ec55/init/0.log" Dec 11 11:47:14 crc kubenswrapper[5016]: I1211 11:47:14.235757 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-8nsnc_18f4fb70-2aaa-471a-9556-b0977ad6ec55/dnsmasq-dns/0.log" Dec 11 11:47:14 crc kubenswrapper[5016]: I1211 11:47:14.264332 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-gcwqv_43a2a77d-f6c4-40ba-8258-ee6bced589f2/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:14 crc kubenswrapper[5016]: I1211 11:47:14.429186 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_67cb2370-3bd3-4105-9369-3b99535ed13f/glance-httpd/0.log" Dec 11 11:47:14 crc kubenswrapper[5016]: I1211 11:47:14.477643 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_67cb2370-3bd3-4105-9369-3b99535ed13f/glance-log/0.log" Dec 11 11:47:14 crc kubenswrapper[5016]: I1211 11:47:14.623107 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_3243e41d-6485-4353-993a-11f309322b5f/glance-httpd/0.log" Dec 11 11:47:14 crc kubenswrapper[5016]: I1211 11:47:14.640252 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_3243e41d-6485-4353-993a-11f309322b5f/glance-log/0.log" Dec 11 11:47:14 crc kubenswrapper[5016]: I1211 11:47:14.800410 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7df5fc4844-wdnrz_02741cc6-3a2a-48c1-b492-57762e0d75e6/horizon/0.log" Dec 11 11:47:15 crc kubenswrapper[5016]: I1211 11:47:15.107981 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-vl6sg_a31cb907-f20d-44a6-abc0-53951fe5e793/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:15 crc kubenswrapper[5016]: I1211 11:47:15.262567 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7df5fc4844-wdnrz_02741cc6-3a2a-48c1-b492-57762e0d75e6/horizon-log/0.log" Dec 11 11:47:15 crc kubenswrapper[5016]: I1211 11:47:15.266681 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-8rmhc_b4760482-fee8-4399-bae9-a30831f41536/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:15 crc kubenswrapper[5016]: I1211 11:47:15.506265 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-589444b9f8-c7wwh_207fc475-2260-4b2f-86a9-c4c0bedf3ce1/keystone-api/0.log" Dec 11 11:47:15 crc kubenswrapper[5016]: I1211 11:47:15.523488 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29424181-nmfs5_59c5a0b3-43ac-48bc-a81f-b33b8b6ae9d2/keystone-cron/0.log" Dec 11 11:47:15 crc kubenswrapper[5016]: I1211 11:47:15.680284 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_1f3e95ec-e5d3-44ab-ae44-1279b0a04e75/kube-state-metrics/0.log" Dec 11 11:47:15 crc kubenswrapper[5016]: I1211 11:47:15.804814 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-8nvvd_ff52a65c-c0b6-4d71-8038-b8c079cd1d64/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:16 crc kubenswrapper[5016]: I1211 11:47:16.193314 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5d4bc555dc-hjmj8_a420329b-5657-402b-8b2c-c6f53beda0d6/neutron-api/0.log" Dec 11 11:47:16 crc kubenswrapper[5016]: I1211 11:47:16.208448 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5d4bc555dc-hjmj8_a420329b-5657-402b-8b2c-c6f53beda0d6/neutron-httpd/0.log" Dec 11 11:47:16 crc kubenswrapper[5016]: I1211 11:47:16.234359 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-v9dg2_24b4bd76-ba99-43ad-91e9-4fdf518a6935/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:16 crc kubenswrapper[5016]: I1211 11:47:16.934128 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_e23cc12c-b028-49ec-ba40-adb9ad2baf59/nova-api-log/0.log" Dec 11 11:47:17 crc kubenswrapper[5016]: I1211 11:47:17.048704 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_dfdff31d-6c59-4f13-ba0c-e5791bd7fedd/nova-cell0-conductor-conductor/0.log" Dec 11 11:47:17 crc kubenswrapper[5016]: I1211 11:47:17.241407 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_9b21ed74-421c-4bbc-b17e-317beee96ae7/nova-cell1-conductor-conductor/0.log" Dec 11 11:47:17 crc kubenswrapper[5016]: I1211 11:47:17.477504 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_c12cf216-8a0e-4b03-bd6b-b5ced3f69c3d/nova-cell1-novncproxy-novncproxy/0.log" Dec 11 11:47:17 crc kubenswrapper[5016]: I1211 11:47:17.513285 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_e23cc12c-b028-49ec-ba40-adb9ad2baf59/nova-api-api/0.log" Dec 11 11:47:17 crc kubenswrapper[5016]: I1211 11:47:17.533332 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-kwsxl_2868a6eb-b9cf-40e7-aabf-4dd1899bbaf6/nova-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:17 crc kubenswrapper[5016]: I1211 11:47:17.741397 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_b74b056a-931e-4c8f-809d-025693ae2e9c/nova-metadata-log/0.log" Dec 11 11:47:18 crc kubenswrapper[5016]: I1211 11:47:18.463195 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_be590587-03d9-4391-98b3-bacb7432ec51/mysql-bootstrap/0.log" Dec 11 11:47:18 crc kubenswrapper[5016]: I1211 11:47:18.623297 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_be590587-03d9-4391-98b3-bacb7432ec51/mysql-bootstrap/0.log" Dec 11 11:47:18 crc kubenswrapper[5016]: I1211 11:47:18.623357 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_cd7590f4-6fdc-450e-8a96-e4ca6315d644/nova-scheduler-scheduler/0.log" Dec 11 11:47:18 crc kubenswrapper[5016]: I1211 11:47:18.728547 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_be590587-03d9-4391-98b3-bacb7432ec51/galera/0.log" Dec 11 11:47:18 crc kubenswrapper[5016]: I1211 11:47:18.851734 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd/mysql-bootstrap/0.log" Dec 11 11:47:19 crc kubenswrapper[5016]: I1211 11:47:19.068747 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd/mysql-bootstrap/0.log" Dec 11 11:47:19 crc kubenswrapper[5016]: I1211 11:47:19.131832 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ab6ac17c-dd30-41ed-a5c8-4c8ce7f349bd/galera/0.log" Dec 11 11:47:19 crc kubenswrapper[5016]: I1211 11:47:19.305355 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_5ab24d20-cfe0-4aeb-a0df-e0d0b245e863/openstackclient/0.log" Dec 11 11:47:19 crc kubenswrapper[5016]: I1211 11:47:19.447545 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_b74b056a-931e-4c8f-809d-025693ae2e9c/nova-metadata-metadata/0.log" Dec 11 11:47:19 crc kubenswrapper[5016]: I1211 11:47:19.483771 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-g76kk_cf0694a8-c7ff-429f-a52f-5885a8dcb3ac/ovn-controller/0.log" Dec 11 11:47:19 crc kubenswrapper[5016]: I1211 11:47:19.611527 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-rg5mz_d49703c4-2744-4669-baae-fc1ee5932f5d/openstack-network-exporter/0.log" Dec 11 11:47:20 crc kubenswrapper[5016]: I1211 11:47:20.166508 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vbtwd_4f718adb-56cf-4983-bd6a-e750e06edad7/ovsdb-server-init/0.log" Dec 11 11:47:20 crc kubenswrapper[5016]: I1211 11:47:20.373852 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vbtwd_4f718adb-56cf-4983-bd6a-e750e06edad7/ovs-vswitchd/0.log" Dec 11 11:47:20 crc kubenswrapper[5016]: I1211 11:47:20.440189 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vbtwd_4f718adb-56cf-4983-bd6a-e750e06edad7/ovsdb-server/0.log" Dec 11 11:47:20 crc kubenswrapper[5016]: I1211 11:47:20.459306 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vbtwd_4f718adb-56cf-4983-bd6a-e750e06edad7/ovsdb-server-init/0.log" Dec 11 11:47:20 crc kubenswrapper[5016]: I1211 11:47:20.695258 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-b97tz_68a2fe3a-3815-4605-b685-2ffe583f46d4/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:20 crc kubenswrapper[5016]: I1211 11:47:20.752533 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_817d89c2-920a-49a9-b87d-308f48847b2f/openstack-network-exporter/0.log" Dec 11 11:47:20 crc kubenswrapper[5016]: I1211 11:47:20.783339 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_817d89c2-920a-49a9-b87d-308f48847b2f/ovn-northd/0.log" Dec 11 11:47:20 crc kubenswrapper[5016]: I1211 11:47:20.947213 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_65ac3f0e-4016-4586-b742-2c52252ed51b/ovsdbserver-nb/0.log" Dec 11 11:47:21 crc kubenswrapper[5016]: I1211 11:47:21.027545 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_65ac3f0e-4016-4586-b742-2c52252ed51b/openstack-network-exporter/0.log" Dec 11 11:47:21 crc kubenswrapper[5016]: I1211 11:47:21.176043 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_de7b514e-0bc7-4260-9bc4-9c0f1b13562b/openstack-network-exporter/0.log" Dec 11 11:47:21 crc kubenswrapper[5016]: I1211 11:47:21.270446 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_de7b514e-0bc7-4260-9bc4-9c0f1b13562b/ovsdbserver-sb/0.log" Dec 11 11:47:21 crc kubenswrapper[5016]: I1211 11:47:21.486305 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-676fd6784-tg4g7_668906e8-a695-43ee-aca4-5b1bd13053eb/placement-api/0.log" Dec 11 11:47:21 crc kubenswrapper[5016]: I1211 11:47:21.549222 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-676fd6784-tg4g7_668906e8-a695-43ee-aca4-5b1bd13053eb/placement-log/0.log" Dec 11 11:47:21 crc kubenswrapper[5016]: I1211 11:47:21.589343 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_24d5919d-ee3d-4023-9a6b-bc1d9838b2ce/setup-container/0.log" Dec 11 11:47:21 crc kubenswrapper[5016]: I1211 11:47:21.800588 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_24d5919d-ee3d-4023-9a6b-bc1d9838b2ce/setup-container/0.log" Dec 11 11:47:21 crc kubenswrapper[5016]: I1211 11:47:21.874811 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_24d5919d-ee3d-4023-9a6b-bc1d9838b2ce/rabbitmq/0.log" Dec 11 11:47:21 crc kubenswrapper[5016]: I1211 11:47:21.946706 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_86d18250-4387-46f7-af2c-2ce21bf43e12/setup-container/0.log" Dec 11 11:47:22 crc kubenswrapper[5016]: I1211 11:47:22.101660 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_86d18250-4387-46f7-af2c-2ce21bf43e12/rabbitmq/0.log" Dec 11 11:47:22 crc kubenswrapper[5016]: I1211 11:47:22.160496 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_86d18250-4387-46f7-af2c-2ce21bf43e12/setup-container/0.log" Dec 11 11:47:22 crc kubenswrapper[5016]: I1211 11:47:22.255962 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-pf6r8_1965e849-9439-404f-96f1-d5ced3154038/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:22 crc kubenswrapper[5016]: I1211 11:47:22.410845 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-26jgb_e240bd3c-2bc0-4e00-b092-51ab30da277d/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:22 crc kubenswrapper[5016]: I1211 11:47:22.583169 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-bdsdl_b8f46431-27eb-4bb3-952a-3dd405e15121/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:22 crc kubenswrapper[5016]: I1211 11:47:22.831886 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-94jbb_f43b972e-9584-45ea-a540-cc2facfb7ec5/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:22 crc kubenswrapper[5016]: I1211 11:47:22.871621 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-lz69w_79db9d17-e0eb-40f5-88ca-5f222544e2b1/ssh-known-hosts-edpm-deployment/0.log" Dec 11 11:47:23 crc kubenswrapper[5016]: I1211 11:47:23.318022 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-58f98f7fd9-rtbw4_2816d686-f2da-4306-9b07-b27dc9eb88f5/proxy-httpd/0.log" Dec 11 11:47:23 crc kubenswrapper[5016]: I1211 11:47:23.346873 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-58f98f7fd9-rtbw4_2816d686-f2da-4306-9b07-b27dc9eb88f5/proxy-server/0.log" Dec 11 11:47:23 crc kubenswrapper[5016]: I1211 11:47:23.530651 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-cfbpr_8d68a71e-cbcb-4ce9-bb01-3b48154074a4/swift-ring-rebalance/0.log" Dec 11 11:47:23 crc kubenswrapper[5016]: I1211 11:47:23.580037 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/account-auditor/0.log" Dec 11 11:47:23 crc kubenswrapper[5016]: I1211 11:47:23.616321 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/account-reaper/0.log" Dec 11 11:47:23 crc kubenswrapper[5016]: I1211 11:47:23.806419 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/account-replicator/0.log" Dec 11 11:47:23 crc kubenswrapper[5016]: I1211 11:47:23.806508 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/account-server/0.log" Dec 11 11:47:23 crc kubenswrapper[5016]: I1211 11:47:23.902673 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/container-auditor/0.log" Dec 11 11:47:23 crc kubenswrapper[5016]: I1211 11:47:23.981146 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/container-replicator/0.log" Dec 11 11:47:24 crc kubenswrapper[5016]: I1211 11:47:24.022507 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/container-server/0.log" Dec 11 11:47:24 crc kubenswrapper[5016]: I1211 11:47:24.127169 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/container-updater/0.log" Dec 11 11:47:24 crc kubenswrapper[5016]: I1211 11:47:24.172202 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/object-auditor/0.log" Dec 11 11:47:24 crc kubenswrapper[5016]: I1211 11:47:24.258714 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/object-expirer/0.log" Dec 11 11:47:24 crc kubenswrapper[5016]: I1211 11:47:24.266966 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/object-replicator/0.log" Dec 11 11:47:24 crc kubenswrapper[5016]: I1211 11:47:24.363621 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/object-updater/0.log" Dec 11 11:47:24 crc kubenswrapper[5016]: I1211 11:47:24.410486 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/object-server/0.log" Dec 11 11:47:24 crc kubenswrapper[5016]: I1211 11:47:24.479705 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/rsync/0.log" Dec 11 11:47:24 crc kubenswrapper[5016]: I1211 11:47:24.579673 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_a24f6c06-a757-4b4b-9361-e87f07af2ca8/swift-recon-cron/0.log" Dec 11 11:47:24 crc kubenswrapper[5016]: I1211 11:47:24.757473 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-9s8mr_604b9ba2-ab41-4901-a9ef-9eb82bee5e4a/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:24 crc kubenswrapper[5016]: I1211 11:47:24.857234 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_d9613e90-5366-4f68-80dd-f66a7541a670/tempest-tests-tempest-tests-runner/0.log" Dec 11 11:47:25 crc kubenswrapper[5016]: I1211 11:47:25.035694 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_51a74a7d-9731-443f-b85f-99645084064a/test-operator-logs-container/0.log" Dec 11 11:47:25 crc kubenswrapper[5016]: I1211 11:47:25.090277 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-7s6ql_53387d31-b49f-4100-9772-a4f7d6898471/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 11 11:47:35 crc kubenswrapper[5016]: I1211 11:47:35.968936 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_f0588b90-e0f3-49e1-9ff9-76e8aac23b93/memcached/0.log" Dec 11 11:47:42 crc kubenswrapper[5016]: I1211 11:47:42.933254 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:47:42 crc kubenswrapper[5016]: I1211 11:47:42.934032 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:47:42 crc kubenswrapper[5016]: I1211 11:47:42.934084 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 11:47:42 crc kubenswrapper[5016]: I1211 11:47:42.935187 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1ef2b4beefe05b953228fbd85b170d6d5a71b88779b80bfcc5c1cc7e2f4425ef"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 11:47:42 crc kubenswrapper[5016]: I1211 11:47:42.935328 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://1ef2b4beefe05b953228fbd85b170d6d5a71b88779b80bfcc5c1cc7e2f4425ef" gracePeriod=600 Dec 11 11:47:43 crc kubenswrapper[5016]: I1211 11:47:43.946435 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="1ef2b4beefe05b953228fbd85b170d6d5a71b88779b80bfcc5c1cc7e2f4425ef" exitCode=0 Dec 11 11:47:43 crc kubenswrapper[5016]: I1211 11:47:43.947217 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"1ef2b4beefe05b953228fbd85b170d6d5a71b88779b80bfcc5c1cc7e2f4425ef"} Dec 11 11:47:43 crc kubenswrapper[5016]: I1211 11:47:43.947252 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerStarted","Data":"6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8"} Dec 11 11:47:43 crc kubenswrapper[5016]: I1211 11:47:43.947271 5016 scope.go:117] "RemoveContainer" containerID="4a5ee40730cda8e5e8cae3e28bef399ae503392001ae1df83b1775687ed1ec03" Dec 11 11:47:53 crc kubenswrapper[5016]: I1211 11:47:53.666871 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/util/0.log" Dec 11 11:47:54 crc kubenswrapper[5016]: I1211 11:47:54.195465 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/util/0.log" Dec 11 11:47:54 crc kubenswrapper[5016]: I1211 11:47:54.586615 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/pull/0.log" Dec 11 11:47:54 crc kubenswrapper[5016]: I1211 11:47:54.770309 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/extract/0.log" Dec 11 11:47:54 crc kubenswrapper[5016]: I1211 11:47:54.775292 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/pull/0.log" Dec 11 11:47:54 crc kubenswrapper[5016]: I1211 11:47:54.821864 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/pull/0.log" Dec 11 11:47:54 crc kubenswrapper[5016]: I1211 11:47:54.846639 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2f50e418bb000dd75ba3f5d004826004037b091e9f7200dc24bfc77527wmmtl_34f1fbe7-6974-4320-8365-3b047d159e3a/util/0.log" Dec 11 11:47:54 crc kubenswrapper[5016]: I1211 11:47:54.972389 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-qtvrv_cc7c5322-f255-4c02-b684-d1bccf74eb1a/kube-rbac-proxy/0.log" Dec 11 11:47:55 crc kubenswrapper[5016]: I1211 11:47:55.099448 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-qtvrv_cc7c5322-f255-4c02-b684-d1bccf74eb1a/manager/0.log" Dec 11 11:47:55 crc kubenswrapper[5016]: I1211 11:47:55.207489 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-rzjg8_de234c3f-f96c-444d-a7f5-a453df14d2e4/manager/0.log" Dec 11 11:47:55 crc kubenswrapper[5016]: I1211 11:47:55.235829 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-rzjg8_de234c3f-f96c-444d-a7f5-a453df14d2e4/kube-rbac-proxy/0.log" Dec 11 11:47:55 crc kubenswrapper[5016]: I1211 11:47:55.372256 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-4hwxf_251e6e53-bbba-4d67-a361-44c471db70ff/kube-rbac-proxy/0.log" Dec 11 11:47:55 crc kubenswrapper[5016]: I1211 11:47:55.412337 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-4hwxf_251e6e53-bbba-4d67-a361-44c471db70ff/manager/0.log" Dec 11 11:47:56 crc kubenswrapper[5016]: I1211 11:47:56.360331 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-v8hsp_b1812840-a032-4c7a-a851-505f89b19063/kube-rbac-proxy/0.log" Dec 11 11:47:56 crc kubenswrapper[5016]: I1211 11:47:56.375538 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-jpkdc_2f35a405-1590-4bd7-9f64-f897bac8e8e7/kube-rbac-proxy/0.log" Dec 11 11:47:56 crc kubenswrapper[5016]: I1211 11:47:56.467889 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-v8hsp_b1812840-a032-4c7a-a851-505f89b19063/manager/0.log" Dec 11 11:47:56 crc kubenswrapper[5016]: I1211 11:47:56.580512 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-jpkdc_2f35a405-1590-4bd7-9f64-f897bac8e8e7/manager/0.log" Dec 11 11:47:56 crc kubenswrapper[5016]: I1211 11:47:56.669877 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-tcntz_daa29314-dcea-4026-9a51-7f9ceaed9052/kube-rbac-proxy/0.log" Dec 11 11:47:56 crc kubenswrapper[5016]: I1211 11:47:56.701854 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-tcntz_daa29314-dcea-4026-9a51-7f9ceaed9052/manager/0.log" Dec 11 11:47:56 crc kubenswrapper[5016]: I1211 11:47:56.896625 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-9s5rq_b44a8ea9-ba71-486d-9672-44146f09acb1/kube-rbac-proxy/0.log" Dec 11 11:47:57 crc kubenswrapper[5016]: I1211 11:47:57.041651 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-48q65_ac95cdf1-ed70-4d47-8b28-3f7f5e68804b/kube-rbac-proxy/0.log" Dec 11 11:47:57 crc kubenswrapper[5016]: I1211 11:47:57.164388 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-9s5rq_b44a8ea9-ba71-486d-9672-44146f09acb1/manager/0.log" Dec 11 11:47:57 crc kubenswrapper[5016]: I1211 11:47:57.171880 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-48q65_ac95cdf1-ed70-4d47-8b28-3f7f5e68804b/manager/0.log" Dec 11 11:47:57 crc kubenswrapper[5016]: I1211 11:47:57.316825 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-lx6dp_b2cd783c-ef38-4478-9f86-60374f554bb2/kube-rbac-proxy/0.log" Dec 11 11:47:57 crc kubenswrapper[5016]: I1211 11:47:57.488804 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-lx6dp_b2cd783c-ef38-4478-9f86-60374f554bb2/manager/0.log" Dec 11 11:47:57 crc kubenswrapper[5016]: I1211 11:47:57.515314 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-46n5n_95b9a24e-2b04-4161-aee4-2b7a73330a4e/kube-rbac-proxy/0.log" Dec 11 11:47:57 crc kubenswrapper[5016]: I1211 11:47:57.577383 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-46n5n_95b9a24e-2b04-4161-aee4-2b7a73330a4e/manager/0.log" Dec 11 11:47:57 crc kubenswrapper[5016]: I1211 11:47:57.713213 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-dfr98_26861a3b-3eb1-4c65-8c69-2d43a2aab77c/kube-rbac-proxy/0.log" Dec 11 11:47:57 crc kubenswrapper[5016]: I1211 11:47:57.741281 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-dfr98_26861a3b-3eb1-4c65-8c69-2d43a2aab77c/manager/0.log" Dec 11 11:47:57 crc kubenswrapper[5016]: I1211 11:47:57.780707 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-5qrxg_cdf76c07-0127-402e-90d7-9c868594b4d7/kube-rbac-proxy/0.log" Dec 11 11:47:58 crc kubenswrapper[5016]: I1211 11:47:58.116186 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-5qrxg_cdf76c07-0127-402e-90d7-9c868594b4d7/manager/0.log" Dec 11 11:47:58 crc kubenswrapper[5016]: I1211 11:47:58.156132 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-vbw9f_e4f0f2a5-a15b-45b8-96ea-91e37ea98237/kube-rbac-proxy/0.log" Dec 11 11:47:58 crc kubenswrapper[5016]: I1211 11:47:58.281191 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-vbw9f_e4f0f2a5-a15b-45b8-96ea-91e37ea98237/manager/0.log" Dec 11 11:47:58 crc kubenswrapper[5016]: I1211 11:47:58.335854 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-vcscv_4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19/kube-rbac-proxy/0.log" Dec 11 11:47:58 crc kubenswrapper[5016]: I1211 11:47:58.392579 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-vcscv_4e9d4c9a-15a9-4f78-bf6a-abeb230d2d19/manager/0.log" Dec 11 11:47:58 crc kubenswrapper[5016]: I1211 11:47:58.510897 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fbtnxw_dfea8003-afd2-45aa-bd7b-dcc5460e8a80/kube-rbac-proxy/0.log" Dec 11 11:47:58 crc kubenswrapper[5016]: I1211 11:47:58.514985 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fbtnxw_dfea8003-afd2-45aa-bd7b-dcc5460e8a80/manager/0.log" Dec 11 11:47:58 crc kubenswrapper[5016]: I1211 11:47:58.850814 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-75cgw_3c73a135-7e40-4ba1-a674-0259ba8677db/registry-server/0.log" Dec 11 11:47:59 crc kubenswrapper[5016]: I1211 11:47:59.018801 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-b7dd9c5f4-ktl4m_2c43efae-bdbc-4043-b4fc-6e04c5f95003/operator/0.log" Dec 11 11:47:59 crc kubenswrapper[5016]: I1211 11:47:59.041878 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-wf9q5_f07a07a1-b235-4d36-a666-2b1be3363f34/kube-rbac-proxy/0.log" Dec 11 11:47:59 crc kubenswrapper[5016]: I1211 11:47:59.146981 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-wf9q5_f07a07a1-b235-4d36-a666-2b1be3363f34/manager/0.log" Dec 11 11:47:59 crc kubenswrapper[5016]: I1211 11:47:59.289219 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-kwflq_1099bcae-fea4-4864-8434-98ed888307e5/kube-rbac-proxy/0.log" Dec 11 11:47:59 crc kubenswrapper[5016]: I1211 11:47:59.333019 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-kwflq_1099bcae-fea4-4864-8434-98ed888307e5/manager/0.log" Dec 11 11:47:59 crc kubenswrapper[5016]: I1211 11:47:59.515874 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-rhkn6_e5d7cce6-369e-4837-8e40-385de0d684f7/operator/0.log" Dec 11 11:47:59 crc kubenswrapper[5016]: I1211 11:47:59.665311 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-5z7c4_1d57e8d7-5c81-4a4d-97a9-af4795392e5a/kube-rbac-proxy/0.log" Dec 11 11:47:59 crc kubenswrapper[5016]: I1211 11:47:59.760397 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-5z7c4_1d57e8d7-5c81-4a4d-97a9-af4795392e5a/manager/0.log" Dec 11 11:47:59 crc kubenswrapper[5016]: I1211 11:47:59.808213 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-595db99498-vmll2_5adcacd2-730a-4cb7-9944-239289405003/manager/0.log" Dec 11 11:47:59 crc kubenswrapper[5016]: I1211 11:47:59.887882 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-58d5ff84df-6jmdx_5e9876fa-8ec4-432b-b582-6ee210b828b5/kube-rbac-proxy/0.log" Dec 11 11:47:59 crc kubenswrapper[5016]: I1211 11:47:59.964016 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-58d5ff84df-6jmdx_5e9876fa-8ec4-432b-b582-6ee210b828b5/manager/0.log" Dec 11 11:47:59 crc kubenswrapper[5016]: I1211 11:47:59.986129 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-wphqn_542f9a19-fab3-426b-bb8a-e12a45e4e422/kube-rbac-proxy/0.log" Dec 11 11:48:00 crc kubenswrapper[5016]: I1211 11:48:00.012378 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-wphqn_542f9a19-fab3-426b-bb8a-e12a45e4e422/manager/0.log" Dec 11 11:48:00 crc kubenswrapper[5016]: I1211 11:48:00.173431 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-75944c9b7-92pjv_82d121a5-e733-445f-be6e-bc96e3c162e2/kube-rbac-proxy/0.log" Dec 11 11:48:00 crc kubenswrapper[5016]: I1211 11:48:00.194395 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-75944c9b7-92pjv_82d121a5-e733-445f-be6e-bc96e3c162e2/manager/0.log" Dec 11 11:48:14 crc kubenswrapper[5016]: I1211 11:48:14.013176 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-dflh8" podUID="ab16d65b-bdbe-4988-9c13-5e0b91c72217" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 11 11:48:19 crc kubenswrapper[5016]: I1211 11:48:19.873034 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-ljcrh_af75da0d-e4cb-4961-b57a-ea888c20af89/control-plane-machine-set-operator/2.log" Dec 11 11:48:19 crc kubenswrapper[5016]: I1211 11:48:19.959782 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-ljcrh_af75da0d-e4cb-4961-b57a-ea888c20af89/control-plane-machine-set-operator/1.log" Dec 11 11:48:20 crc kubenswrapper[5016]: I1211 11:48:20.133070 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-tn6f4_cb94a68f-794d-4e0f-9a65-aff1b885d021/kube-rbac-proxy/0.log" Dec 11 11:48:20 crc kubenswrapper[5016]: I1211 11:48:20.186829 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-tn6f4_cb94a68f-794d-4e0f-9a65-aff1b885d021/machine-api-operator/0.log" Dec 11 11:48:34 crc kubenswrapper[5016]: I1211 11:48:34.366105 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-sm8ss_f63479f5-0af6-4622-85fb-42bcfb115692/cert-manager-controller/0.log" Dec 11 11:48:34 crc kubenswrapper[5016]: I1211 11:48:34.565338 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-dlzf6_058f486a-6a97-4bc7-9e43-65af0e4b5634/cert-manager-cainjector/0.log" Dec 11 11:48:34 crc kubenswrapper[5016]: I1211 11:48:34.669416 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-8pgz5_e7d758da-34fb-4507-83f4-1e5f948d9249/cert-manager-webhook/0.log" Dec 11 11:48:44 crc kubenswrapper[5016]: I1211 11:48:44.931904 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bjnr8"] Dec 11 11:48:44 crc kubenswrapper[5016]: E1211 11:48:44.933240 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="620b7155-17cf-4017-83a6-57befbb3e08b" containerName="container-00" Dec 11 11:48:44 crc kubenswrapper[5016]: I1211 11:48:44.933263 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="620b7155-17cf-4017-83a6-57befbb3e08b" containerName="container-00" Dec 11 11:48:44 crc kubenswrapper[5016]: I1211 11:48:44.933536 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="620b7155-17cf-4017-83a6-57befbb3e08b" containerName="container-00" Dec 11 11:48:44 crc kubenswrapper[5016]: I1211 11:48:44.937070 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:44 crc kubenswrapper[5016]: I1211 11:48:44.943135 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bjnr8"] Dec 11 11:48:45 crc kubenswrapper[5016]: I1211 11:48:45.031294 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92382491-2c6f-40c2-b4a8-1b46fef50dcf-utilities\") pod \"redhat-operators-bjnr8\" (UID: \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\") " pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:45 crc kubenswrapper[5016]: I1211 11:48:45.031388 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckwkh\" (UniqueName: \"kubernetes.io/projected/92382491-2c6f-40c2-b4a8-1b46fef50dcf-kube-api-access-ckwkh\") pod \"redhat-operators-bjnr8\" (UID: \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\") " pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:45 crc kubenswrapper[5016]: I1211 11:48:45.031547 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92382491-2c6f-40c2-b4a8-1b46fef50dcf-catalog-content\") pod \"redhat-operators-bjnr8\" (UID: \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\") " pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:45 crc kubenswrapper[5016]: I1211 11:48:45.133341 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckwkh\" (UniqueName: \"kubernetes.io/projected/92382491-2c6f-40c2-b4a8-1b46fef50dcf-kube-api-access-ckwkh\") pod \"redhat-operators-bjnr8\" (UID: \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\") " pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:45 crc kubenswrapper[5016]: I1211 11:48:45.134169 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92382491-2c6f-40c2-b4a8-1b46fef50dcf-catalog-content\") pod \"redhat-operators-bjnr8\" (UID: \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\") " pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:45 crc kubenswrapper[5016]: I1211 11:48:45.134907 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92382491-2c6f-40c2-b4a8-1b46fef50dcf-utilities\") pod \"redhat-operators-bjnr8\" (UID: \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\") " pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:45 crc kubenswrapper[5016]: I1211 11:48:45.134714 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92382491-2c6f-40c2-b4a8-1b46fef50dcf-catalog-content\") pod \"redhat-operators-bjnr8\" (UID: \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\") " pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:45 crc kubenswrapper[5016]: I1211 11:48:45.135271 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92382491-2c6f-40c2-b4a8-1b46fef50dcf-utilities\") pod \"redhat-operators-bjnr8\" (UID: \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\") " pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:45 crc kubenswrapper[5016]: I1211 11:48:45.159224 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckwkh\" (UniqueName: \"kubernetes.io/projected/92382491-2c6f-40c2-b4a8-1b46fef50dcf-kube-api-access-ckwkh\") pod \"redhat-operators-bjnr8\" (UID: \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\") " pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:45 crc kubenswrapper[5016]: I1211 11:48:45.264083 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:45 crc kubenswrapper[5016]: I1211 11:48:45.758452 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bjnr8"] Dec 11 11:48:46 crc kubenswrapper[5016]: I1211 11:48:46.642851 5016 generic.go:334] "Generic (PLEG): container finished" podID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" containerID="03a038d644dba62294971f6d09c02a954123a99105e97fa334b98d5cc20c193a" exitCode=0 Dec 11 11:48:46 crc kubenswrapper[5016]: I1211 11:48:46.642971 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjnr8" event={"ID":"92382491-2c6f-40c2-b4a8-1b46fef50dcf","Type":"ContainerDied","Data":"03a038d644dba62294971f6d09c02a954123a99105e97fa334b98d5cc20c193a"} Dec 11 11:48:46 crc kubenswrapper[5016]: I1211 11:48:46.644740 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjnr8" event={"ID":"92382491-2c6f-40c2-b4a8-1b46fef50dcf","Type":"ContainerStarted","Data":"a75dfab8dd8c64c5c704a509ea6dc155cda91997f34719d9f5e4ac5bfbd1d5e9"} Dec 11 11:48:46 crc kubenswrapper[5016]: I1211 11:48:46.646370 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 11:48:48 crc kubenswrapper[5016]: I1211 11:48:48.283008 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6ff7998486-6wx52_35c9ce4d-504d-4813-b776-f5d07b9c3d1d/nmstate-console-plugin/0.log" Dec 11 11:48:48 crc kubenswrapper[5016]: I1211 11:48:48.365292 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-276mn_f541e158-4765-46f6-9a14-f6917fa4b1e3/nmstate-handler/0.log" Dec 11 11:48:48 crc kubenswrapper[5016]: I1211 11:48:48.519715 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-h52ww_32c0573d-b135-42ba-bec4-9092104e870c/nmstate-metrics/0.log" Dec 11 11:48:48 crc kubenswrapper[5016]: I1211 11:48:48.555844 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f7f7578db-h52ww_32c0573d-b135-42ba-bec4-9092104e870c/kube-rbac-proxy/0.log" Dec 11 11:48:48 crc kubenswrapper[5016]: I1211 11:48:48.677217 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjnr8" event={"ID":"92382491-2c6f-40c2-b4a8-1b46fef50dcf","Type":"ContainerStarted","Data":"1a91e41cb59b0c1801db987003aaac8ea505b195b592c118690674bbbb62706a"} Dec 11 11:48:48 crc kubenswrapper[5016]: I1211 11:48:48.925199 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-6769fb99d-bcg49_040df3df-7870-45d8-b15c-4f083db8385f/nmstate-operator/0.log" Dec 11 11:48:48 crc kubenswrapper[5016]: I1211 11:48:48.989042 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-f8fb84555-lz22l_671f4389-4dd6-45c7-8eda-d60191819517/nmstate-webhook/0.log" Dec 11 11:48:49 crc kubenswrapper[5016]: I1211 11:48:49.687539 5016 generic.go:334] "Generic (PLEG): container finished" podID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" containerID="1a91e41cb59b0c1801db987003aaac8ea505b195b592c118690674bbbb62706a" exitCode=0 Dec 11 11:48:49 crc kubenswrapper[5016]: I1211 11:48:49.687658 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjnr8" event={"ID":"92382491-2c6f-40c2-b4a8-1b46fef50dcf","Type":"ContainerDied","Data":"1a91e41cb59b0c1801db987003aaac8ea505b195b592c118690674bbbb62706a"} Dec 11 11:48:50 crc kubenswrapper[5016]: I1211 11:48:50.719313 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjnr8" event={"ID":"92382491-2c6f-40c2-b4a8-1b46fef50dcf","Type":"ContainerStarted","Data":"fa99ed64e49b2a9e4d7a5da6b1f91cd53f6acd182f8c0e43cce185a0bc3a654f"} Dec 11 11:48:50 crc kubenswrapper[5016]: I1211 11:48:50.744805 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bjnr8" podStartSLOduration=3.289943913 podStartE2EDuration="6.74478054s" podCreationTimestamp="2025-12-11 11:48:44 +0000 UTC" firstStartedPulling="2025-12-11 11:48:46.645880832 +0000 UTC m=+4443.464440411" lastFinishedPulling="2025-12-11 11:48:50.100717459 +0000 UTC m=+4446.919277038" observedRunningTime="2025-12-11 11:48:50.740135915 +0000 UTC m=+4447.558695494" watchObservedRunningTime="2025-12-11 11:48:50.74478054 +0000 UTC m=+4447.563340119" Dec 11 11:48:55 crc kubenswrapper[5016]: I1211 11:48:55.264519 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:55 crc kubenswrapper[5016]: I1211 11:48:55.266336 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:48:56 crc kubenswrapper[5016]: I1211 11:48:56.318766 5016 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bjnr8" podUID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" containerName="registry-server" probeResult="failure" output=< Dec 11 11:48:56 crc kubenswrapper[5016]: timeout: failed to connect service ":50051" within 1s Dec 11 11:48:56 crc kubenswrapper[5016]: > Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.124954 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-skxpb_c4c659bc-4572-4852-8008-231dc642bbd7/kube-rbac-proxy/0.log" Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.213798 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-5bddd4b946-skxpb_c4c659bc-4572-4852-8008-231dc642bbd7/controller/0.log" Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.318484 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.372686 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.384749 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-frr-files/0.log" Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.555293 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bjnr8"] Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.584494 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-frr-files/0.log" Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.591770 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-reloader/0.log" Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.616704 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-reloader/0.log" Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.628027 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-metrics/0.log" Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.849161 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-metrics/0.log" Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.856594 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-frr-files/0.log" Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.871229 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-reloader/0.log" Dec 11 11:49:05 crc kubenswrapper[5016]: I1211 11:49:05.874509 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-metrics/0.log" Dec 11 11:49:06 crc kubenswrapper[5016]: I1211 11:49:06.098197 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-frr-files/0.log" Dec 11 11:49:06 crc kubenswrapper[5016]: I1211 11:49:06.102485 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-reloader/0.log" Dec 11 11:49:06 crc kubenswrapper[5016]: I1211 11:49:06.105011 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/controller/0.log" Dec 11 11:49:06 crc kubenswrapper[5016]: I1211 11:49:06.122382 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/cp-metrics/0.log" Dec 11 11:49:06 crc kubenswrapper[5016]: I1211 11:49:06.272829 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/kube-rbac-proxy/0.log" Dec 11 11:49:06 crc kubenswrapper[5016]: I1211 11:49:06.301198 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/frr-metrics/0.log" Dec 11 11:49:06 crc kubenswrapper[5016]: I1211 11:49:06.360663 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/kube-rbac-proxy-frr/0.log" Dec 11 11:49:06 crc kubenswrapper[5016]: I1211 11:49:06.576262 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/reloader/0.log" Dec 11 11:49:06 crc kubenswrapper[5016]: I1211 11:49:06.601775 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7784b6fcf-4c5v8_76b7a036-07e1-4a49-b5c4-39ed67ae34b6/frr-k8s-webhook-server/0.log" Dec 11 11:49:06 crc kubenswrapper[5016]: I1211 11:49:06.836968 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6fcfbcfbcf-dfwbt_bdd623de-6c7c-46b2-a168-fabbbf16ce6c/manager/0.log" Dec 11 11:49:06 crc kubenswrapper[5016]: I1211 11:49:06.855049 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bjnr8" podUID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" containerName="registry-server" containerID="cri-o://fa99ed64e49b2a9e4d7a5da6b1f91cd53f6acd182f8c0e43cce185a0bc3a654f" gracePeriod=2 Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.049810 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7c896697b-mstrs_8b488ea8-aba6-430a-bb39-d1459ef2edea/webhook-server/0.log" Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.113836 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tdtwb_c3691778-17ce-4c44-b8e1-f9f5a6727778/kube-rbac-proxy/0.log" Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.661896 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dflh8_ab16d65b-bdbe-4988-9c13-5e0b91c72217/frr/0.log" Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.869142 5016 generic.go:334] "Generic (PLEG): container finished" podID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" containerID="fa99ed64e49b2a9e4d7a5da6b1f91cd53f6acd182f8c0e43cce185a0bc3a654f" exitCode=0 Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.869184 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjnr8" event={"ID":"92382491-2c6f-40c2-b4a8-1b46fef50dcf","Type":"ContainerDied","Data":"fa99ed64e49b2a9e4d7a5da6b1f91cd53f6acd182f8c0e43cce185a0bc3a654f"} Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.869211 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjnr8" event={"ID":"92382491-2c6f-40c2-b4a8-1b46fef50dcf","Type":"ContainerDied","Data":"a75dfab8dd8c64c5c704a509ea6dc155cda91997f34719d9f5e4ac5bfbd1d5e9"} Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.869222 5016 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a75dfab8dd8c64c5c704a509ea6dc155cda91997f34719d9f5e4ac5bfbd1d5e9" Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.883667 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.944918 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92382491-2c6f-40c2-b4a8-1b46fef50dcf-catalog-content\") pod \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\" (UID: \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\") " Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.945135 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckwkh\" (UniqueName: \"kubernetes.io/projected/92382491-2c6f-40c2-b4a8-1b46fef50dcf-kube-api-access-ckwkh\") pod \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\" (UID: \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\") " Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.945390 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92382491-2c6f-40c2-b4a8-1b46fef50dcf-utilities\") pod \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\" (UID: \"92382491-2c6f-40c2-b4a8-1b46fef50dcf\") " Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.946546 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92382491-2c6f-40c2-b4a8-1b46fef50dcf-utilities" (OuterVolumeSpecName: "utilities") pod "92382491-2c6f-40c2-b4a8-1b46fef50dcf" (UID: "92382491-2c6f-40c2-b4a8-1b46fef50dcf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:49:07 crc kubenswrapper[5016]: I1211 11:49:07.984582 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92382491-2c6f-40c2-b4a8-1b46fef50dcf-kube-api-access-ckwkh" (OuterVolumeSpecName: "kube-api-access-ckwkh") pod "92382491-2c6f-40c2-b4a8-1b46fef50dcf" (UID: "92382491-2c6f-40c2-b4a8-1b46fef50dcf"). InnerVolumeSpecName "kube-api-access-ckwkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:49:08 crc kubenswrapper[5016]: I1211 11:49:08.047412 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92382491-2c6f-40c2-b4a8-1b46fef50dcf-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:49:08 crc kubenswrapper[5016]: I1211 11:49:08.047446 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckwkh\" (UniqueName: \"kubernetes.io/projected/92382491-2c6f-40c2-b4a8-1b46fef50dcf-kube-api-access-ckwkh\") on node \"crc\" DevicePath \"\"" Dec 11 11:49:08 crc kubenswrapper[5016]: I1211 11:49:08.082417 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92382491-2c6f-40c2-b4a8-1b46fef50dcf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92382491-2c6f-40c2-b4a8-1b46fef50dcf" (UID: "92382491-2c6f-40c2-b4a8-1b46fef50dcf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:49:08 crc kubenswrapper[5016]: I1211 11:49:08.123471 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tdtwb_c3691778-17ce-4c44-b8e1-f9f5a6727778/speaker/0.log" Dec 11 11:49:08 crc kubenswrapper[5016]: I1211 11:49:08.149142 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92382491-2c6f-40c2-b4a8-1b46fef50dcf-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:49:08 crc kubenswrapper[5016]: I1211 11:49:08.878312 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bjnr8" Dec 11 11:49:08 crc kubenswrapper[5016]: I1211 11:49:08.915246 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bjnr8"] Dec 11 11:49:08 crc kubenswrapper[5016]: I1211 11:49:08.928507 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bjnr8"] Dec 11 11:49:09 crc kubenswrapper[5016]: I1211 11:49:09.495073 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" path="/var/lib/kubelet/pods/92382491-2c6f-40c2-b4a8-1b46fef50dcf/volumes" Dec 11 11:49:20 crc kubenswrapper[5016]: I1211 11:49:20.867026 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/util/0.log" Dec 11 11:49:21 crc kubenswrapper[5016]: I1211 11:49:21.427592 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/pull/0.log" Dec 11 11:49:21 crc kubenswrapper[5016]: I1211 11:49:21.434827 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/pull/0.log" Dec 11 11:49:21 crc kubenswrapper[5016]: I1211 11:49:21.611156 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/util/0.log" Dec 11 11:49:21 crc kubenswrapper[5016]: I1211 11:49:21.617198 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/util/0.log" Dec 11 11:49:21 crc kubenswrapper[5016]: I1211 11:49:21.620727 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/pull/0.log" Dec 11 11:49:21 crc kubenswrapper[5016]: I1211 11:49:21.680982 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5b7fccbebf0e22d2dd769066fa7aaa90fd620c5db34f2af6c91e4319d4lb2ld_148dba01-3eb3-49e5-8662-3824d9933a4c/extract/0.log" Dec 11 11:49:21 crc kubenswrapper[5016]: I1211 11:49:21.778212 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/util/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.016360 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/pull/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.025214 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/util/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.031436 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/pull/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.237667 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/util/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.258428 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/extract/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.259383 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98085b0df3808ebec39f9f9529f737144fe2dbcdaa4f334014817c0fa8sk7jc_20233dbc-fd39-4958-bde1-4912a7363bf7/pull/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.408161 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/extract-utilities/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.583796 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/extract-utilities/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.625668 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/extract-content/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.626794 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/extract-content/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.770512 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/extract-utilities/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.792413 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/extract-content/0.log" Dec 11 11:49:22 crc kubenswrapper[5016]: I1211 11:49:22.971363 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/extract-utilities/0.log" Dec 11 11:49:23 crc kubenswrapper[5016]: I1211 11:49:23.126172 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-l6bqr_0a30dded-eec4-4ced-92e2-bdc3209447aa/registry-server/0.log" Dec 11 11:49:23 crc kubenswrapper[5016]: I1211 11:49:23.240974 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/extract-content/0.log" Dec 11 11:49:23 crc kubenswrapper[5016]: I1211 11:49:23.241143 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/extract-content/0.log" Dec 11 11:49:23 crc kubenswrapper[5016]: I1211 11:49:23.286360 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/extract-utilities/0.log" Dec 11 11:49:23 crc kubenswrapper[5016]: I1211 11:49:23.440913 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/extract-utilities/0.log" Dec 11 11:49:23 crc kubenswrapper[5016]: I1211 11:49:23.500068 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/extract-content/0.log" Dec 11 11:49:23 crc kubenswrapper[5016]: I1211 11:49:23.705021 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-fqzqf_60a5c1c0-450b-4360-b6f5-7380a0a2db4f/marketplace-operator/0.log" Dec 11 11:49:23 crc kubenswrapper[5016]: I1211 11:49:23.840167 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/extract-utilities/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.149530 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/extract-utilities/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.171282 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-pkv6p_a515f52f-4817-4d70-8545-ea013bdd98f4/registry-server/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.178392 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/extract-content/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.198733 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/extract-content/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.372200 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/extract-content/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.377601 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/extract-utilities/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.489428 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/extract-utilities/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.576832 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-khfpb_22d65e49-69a1-4e26-bc1c-52bab4fc01ff/registry-server/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.689285 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/extract-utilities/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.701988 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/extract-content/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.709453 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/extract-content/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.889060 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/extract-utilities/0.log" Dec 11 11:49:24 crc kubenswrapper[5016]: I1211 11:49:24.891276 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/extract-content/0.log" Dec 11 11:49:25 crc kubenswrapper[5016]: I1211 11:49:25.421536 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6tbch_801d4e82-9cd5-4795-9363-b4eca6f2189e/registry-server/0.log" Dec 11 11:50:12 crc kubenswrapper[5016]: I1211 11:50:12.933426 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:50:12 crc kubenswrapper[5016]: I1211 11:50:12.934286 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:50:42 crc kubenswrapper[5016]: I1211 11:50:42.933088 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:50:42 crc kubenswrapper[5016]: I1211 11:50:42.934661 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:51:12 crc kubenswrapper[5016]: I1211 11:51:12.933211 5016 patch_prober.go:28] interesting pod/machine-config-daemon-2x7t7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 11 11:51:12 crc kubenswrapper[5016]: I1211 11:51:12.933843 5016 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 11 11:51:12 crc kubenswrapper[5016]: I1211 11:51:12.933898 5016 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" Dec 11 11:51:12 crc kubenswrapper[5016]: I1211 11:51:12.934920 5016 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8"} pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 11 11:51:12 crc kubenswrapper[5016]: I1211 11:51:12.935038 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerName="machine-config-daemon" containerID="cri-o://6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" gracePeriod=600 Dec 11 11:51:13 crc kubenswrapper[5016]: E1211 11:51:13.055892 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:51:13 crc kubenswrapper[5016]: I1211 11:51:13.174448 5016 generic.go:334] "Generic (PLEG): container finished" podID="40426f6c-f429-48d5-be22-60e0c4009823" containerID="413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f" exitCode=0 Dec 11 11:51:13 crc kubenswrapper[5016]: I1211 11:51:13.174499 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zbtll/must-gather-56hjr" event={"ID":"40426f6c-f429-48d5-be22-60e0c4009823","Type":"ContainerDied","Data":"413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f"} Dec 11 11:51:13 crc kubenswrapper[5016]: I1211 11:51:13.175411 5016 scope.go:117] "RemoveContainer" containerID="413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f" Dec 11 11:51:13 crc kubenswrapper[5016]: I1211 11:51:13.177515 5016 generic.go:334] "Generic (PLEG): container finished" podID="e679c083-2480-4bc8-a8ea-dc2ff0412508" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" exitCode=0 Dec 11 11:51:13 crc kubenswrapper[5016]: I1211 11:51:13.177576 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" event={"ID":"e679c083-2480-4bc8-a8ea-dc2ff0412508","Type":"ContainerDied","Data":"6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8"} Dec 11 11:51:13 crc kubenswrapper[5016]: I1211 11:51:13.177608 5016 scope.go:117] "RemoveContainer" containerID="1ef2b4beefe05b953228fbd85b170d6d5a71b88779b80bfcc5c1cc7e2f4425ef" Dec 11 11:51:13 crc kubenswrapper[5016]: I1211 11:51:13.178055 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:51:13 crc kubenswrapper[5016]: E1211 11:51:13.178334 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:51:14 crc kubenswrapper[5016]: I1211 11:51:14.063030 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zbtll_must-gather-56hjr_40426f6c-f429-48d5-be22-60e0c4009823/gather/0.log" Dec 11 11:51:24 crc kubenswrapper[5016]: I1211 11:51:24.417847 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zbtll/must-gather-56hjr"] Dec 11 11:51:24 crc kubenswrapper[5016]: I1211 11:51:24.418583 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-zbtll/must-gather-56hjr" podUID="40426f6c-f429-48d5-be22-60e0c4009823" containerName="copy" containerID="cri-o://d44abe53a9bdbd50dc938dbbd585fd45700625d66d9f31d6e9a7d834c587843e" gracePeriod=2 Dec 11 11:51:24 crc kubenswrapper[5016]: I1211 11:51:24.428585 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zbtll/must-gather-56hjr"] Dec 11 11:51:24 crc kubenswrapper[5016]: I1211 11:51:24.475326 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:51:24 crc kubenswrapper[5016]: E1211 11:51:24.475604 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:51:24 crc kubenswrapper[5016]: I1211 11:51:24.870977 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zbtll_must-gather-56hjr_40426f6c-f429-48d5-be22-60e0c4009823/copy/0.log" Dec 11 11:51:24 crc kubenswrapper[5016]: I1211 11:51:24.871792 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/must-gather-56hjr" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.054978 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/40426f6c-f429-48d5-be22-60e0c4009823-must-gather-output\") pod \"40426f6c-f429-48d5-be22-60e0c4009823\" (UID: \"40426f6c-f429-48d5-be22-60e0c4009823\") " Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.055238 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rdd92\" (UniqueName: \"kubernetes.io/projected/40426f6c-f429-48d5-be22-60e0c4009823-kube-api-access-rdd92\") pod \"40426f6c-f429-48d5-be22-60e0c4009823\" (UID: \"40426f6c-f429-48d5-be22-60e0c4009823\") " Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.060879 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40426f6c-f429-48d5-be22-60e0c4009823-kube-api-access-rdd92" (OuterVolumeSpecName: "kube-api-access-rdd92") pod "40426f6c-f429-48d5-be22-60e0c4009823" (UID: "40426f6c-f429-48d5-be22-60e0c4009823"). InnerVolumeSpecName "kube-api-access-rdd92". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.159935 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rdd92\" (UniqueName: \"kubernetes.io/projected/40426f6c-f429-48d5-be22-60e0c4009823-kube-api-access-rdd92\") on node \"crc\" DevicePath \"\"" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.228917 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40426f6c-f429-48d5-be22-60e0c4009823-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "40426f6c-f429-48d5-be22-60e0c4009823" (UID: "40426f6c-f429-48d5-be22-60e0c4009823"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.263548 5016 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/40426f6c-f429-48d5-be22-60e0c4009823-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.286435 5016 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zbtll_must-gather-56hjr_40426f6c-f429-48d5-be22-60e0c4009823/copy/0.log" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.286864 5016 generic.go:334] "Generic (PLEG): container finished" podID="40426f6c-f429-48d5-be22-60e0c4009823" containerID="d44abe53a9bdbd50dc938dbbd585fd45700625d66d9f31d6e9a7d834c587843e" exitCode=143 Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.286922 5016 scope.go:117] "RemoveContainer" containerID="d44abe53a9bdbd50dc938dbbd585fd45700625d66d9f31d6e9a7d834c587843e" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.287115 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zbtll/must-gather-56hjr" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.320751 5016 scope.go:117] "RemoveContainer" containerID="413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.402661 5016 scope.go:117] "RemoveContainer" containerID="d44abe53a9bdbd50dc938dbbd585fd45700625d66d9f31d6e9a7d834c587843e" Dec 11 11:51:25 crc kubenswrapper[5016]: E1211 11:51:25.403175 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d44abe53a9bdbd50dc938dbbd585fd45700625d66d9f31d6e9a7d834c587843e\": container with ID starting with d44abe53a9bdbd50dc938dbbd585fd45700625d66d9f31d6e9a7d834c587843e not found: ID does not exist" containerID="d44abe53a9bdbd50dc938dbbd585fd45700625d66d9f31d6e9a7d834c587843e" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.403229 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d44abe53a9bdbd50dc938dbbd585fd45700625d66d9f31d6e9a7d834c587843e"} err="failed to get container status \"d44abe53a9bdbd50dc938dbbd585fd45700625d66d9f31d6e9a7d834c587843e\": rpc error: code = NotFound desc = could not find container \"d44abe53a9bdbd50dc938dbbd585fd45700625d66d9f31d6e9a7d834c587843e\": container with ID starting with d44abe53a9bdbd50dc938dbbd585fd45700625d66d9f31d6e9a7d834c587843e not found: ID does not exist" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.403264 5016 scope.go:117] "RemoveContainer" containerID="413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f" Dec 11 11:51:25 crc kubenswrapper[5016]: E1211 11:51:25.403817 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f\": container with ID starting with 413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f not found: ID does not exist" containerID="413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.403855 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f"} err="failed to get container status \"413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f\": rpc error: code = NotFound desc = could not find container \"413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f\": container with ID starting with 413960ee314c693260cba997444dadd366cfc15d068e71dfe3ebdfb96942820f not found: ID does not exist" Dec 11 11:51:25 crc kubenswrapper[5016]: I1211 11:51:25.486553 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40426f6c-f429-48d5-be22-60e0c4009823" path="/var/lib/kubelet/pods/40426f6c-f429-48d5-be22-60e0c4009823/volumes" Dec 11 11:51:36 crc kubenswrapper[5016]: I1211 11:51:36.475384 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:51:36 crc kubenswrapper[5016]: E1211 11:51:36.477375 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:51:49 crc kubenswrapper[5016]: I1211 11:51:49.475459 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:51:49 crc kubenswrapper[5016]: E1211 11:51:49.476762 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:52:03 crc kubenswrapper[5016]: I1211 11:52:03.481834 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:52:03 crc kubenswrapper[5016]: E1211 11:52:03.482545 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:52:13 crc kubenswrapper[5016]: I1211 11:52:13.671840 5016 scope.go:117] "RemoveContainer" containerID="4f9a3e39f32faf8b4999b04eac689e6601497dc53506b91d90a7366e6a72c554" Dec 11 11:52:15 crc kubenswrapper[5016]: I1211 11:52:15.475059 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:52:15 crc kubenswrapper[5016]: E1211 11:52:15.475539 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:52:27 crc kubenswrapper[5016]: I1211 11:52:27.475176 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:52:27 crc kubenswrapper[5016]: E1211 11:52:27.475862 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.620163 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vp2l9"] Dec 11 11:52:38 crc kubenswrapper[5016]: E1211 11:52:38.621198 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" containerName="extract-utilities" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.621216 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" containerName="extract-utilities" Dec 11 11:52:38 crc kubenswrapper[5016]: E1211 11:52:38.621233 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40426f6c-f429-48d5-be22-60e0c4009823" containerName="copy" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.621240 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="40426f6c-f429-48d5-be22-60e0c4009823" containerName="copy" Dec 11 11:52:38 crc kubenswrapper[5016]: E1211 11:52:38.621261 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" containerName="registry-server" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.621268 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" containerName="registry-server" Dec 11 11:52:38 crc kubenswrapper[5016]: E1211 11:52:38.621287 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40426f6c-f429-48d5-be22-60e0c4009823" containerName="gather" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.621294 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="40426f6c-f429-48d5-be22-60e0c4009823" containerName="gather" Dec 11 11:52:38 crc kubenswrapper[5016]: E1211 11:52:38.621326 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" containerName="extract-content" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.621333 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" containerName="extract-content" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.621605 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="92382491-2c6f-40c2-b4a8-1b46fef50dcf" containerName="registry-server" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.621622 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="40426f6c-f429-48d5-be22-60e0c4009823" containerName="gather" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.621637 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="40426f6c-f429-48d5-be22-60e0c4009823" containerName="copy" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.627394 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.648643 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vp2l9"] Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.790580 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hc7p\" (UniqueName: \"kubernetes.io/projected/e7cc2faf-2427-422c-bef9-d98a42541a28-kube-api-access-8hc7p\") pod \"redhat-marketplace-vp2l9\" (UID: \"e7cc2faf-2427-422c-bef9-d98a42541a28\") " pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.790642 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7cc2faf-2427-422c-bef9-d98a42541a28-utilities\") pod \"redhat-marketplace-vp2l9\" (UID: \"e7cc2faf-2427-422c-bef9-d98a42541a28\") " pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.790696 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7cc2faf-2427-422c-bef9-d98a42541a28-catalog-content\") pod \"redhat-marketplace-vp2l9\" (UID: \"e7cc2faf-2427-422c-bef9-d98a42541a28\") " pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.893308 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hc7p\" (UniqueName: \"kubernetes.io/projected/e7cc2faf-2427-422c-bef9-d98a42541a28-kube-api-access-8hc7p\") pod \"redhat-marketplace-vp2l9\" (UID: \"e7cc2faf-2427-422c-bef9-d98a42541a28\") " pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.893368 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7cc2faf-2427-422c-bef9-d98a42541a28-utilities\") pod \"redhat-marketplace-vp2l9\" (UID: \"e7cc2faf-2427-422c-bef9-d98a42541a28\") " pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.893414 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7cc2faf-2427-422c-bef9-d98a42541a28-catalog-content\") pod \"redhat-marketplace-vp2l9\" (UID: \"e7cc2faf-2427-422c-bef9-d98a42541a28\") " pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.893971 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7cc2faf-2427-422c-bef9-d98a42541a28-utilities\") pod \"redhat-marketplace-vp2l9\" (UID: \"e7cc2faf-2427-422c-bef9-d98a42541a28\") " pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.894013 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7cc2faf-2427-422c-bef9-d98a42541a28-catalog-content\") pod \"redhat-marketplace-vp2l9\" (UID: \"e7cc2faf-2427-422c-bef9-d98a42541a28\") " pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.912497 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hc7p\" (UniqueName: \"kubernetes.io/projected/e7cc2faf-2427-422c-bef9-d98a42541a28-kube-api-access-8hc7p\") pod \"redhat-marketplace-vp2l9\" (UID: \"e7cc2faf-2427-422c-bef9-d98a42541a28\") " pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:38 crc kubenswrapper[5016]: I1211 11:52:38.988388 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:39 crc kubenswrapper[5016]: I1211 11:52:39.475531 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:52:39 crc kubenswrapper[5016]: E1211 11:52:39.476113 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:52:39 crc kubenswrapper[5016]: I1211 11:52:39.512700 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vp2l9"] Dec 11 11:52:39 crc kubenswrapper[5016]: I1211 11:52:39.770131 5016 generic.go:334] "Generic (PLEG): container finished" podID="e7cc2faf-2427-422c-bef9-d98a42541a28" containerID="c80cdf52db10357294bca2cd668060036c6dfbf588540f9762fcff6200f01ed1" exitCode=0 Dec 11 11:52:39 crc kubenswrapper[5016]: I1211 11:52:39.770189 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp2l9" event={"ID":"e7cc2faf-2427-422c-bef9-d98a42541a28","Type":"ContainerDied","Data":"c80cdf52db10357294bca2cd668060036c6dfbf588540f9762fcff6200f01ed1"} Dec 11 11:52:39 crc kubenswrapper[5016]: I1211 11:52:39.770520 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp2l9" event={"ID":"e7cc2faf-2427-422c-bef9-d98a42541a28","Type":"ContainerStarted","Data":"5fb6c71e09e22a33c142d49af3f2857b2403e45c1571783700a4e12f02d2ae19"} Dec 11 11:52:40 crc kubenswrapper[5016]: I1211 11:52:40.787061 5016 generic.go:334] "Generic (PLEG): container finished" podID="e7cc2faf-2427-422c-bef9-d98a42541a28" containerID="46a72d16f02f4a8278af8331c96a6577951ea32cf804038f1b5df4bfe15b5e34" exitCode=0 Dec 11 11:52:40 crc kubenswrapper[5016]: I1211 11:52:40.787207 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp2l9" event={"ID":"e7cc2faf-2427-422c-bef9-d98a42541a28","Type":"ContainerDied","Data":"46a72d16f02f4a8278af8331c96a6577951ea32cf804038f1b5df4bfe15b5e34"} Dec 11 11:52:41 crc kubenswrapper[5016]: I1211 11:52:41.798177 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp2l9" event={"ID":"e7cc2faf-2427-422c-bef9-d98a42541a28","Type":"ContainerStarted","Data":"bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377"} Dec 11 11:52:41 crc kubenswrapper[5016]: I1211 11:52:41.817403 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vp2l9" podStartSLOduration=2.177409081 podStartE2EDuration="3.817380674s" podCreationTimestamp="2025-12-11 11:52:38 +0000 UTC" firstStartedPulling="2025-12-11 11:52:39.773957873 +0000 UTC m=+4676.592517452" lastFinishedPulling="2025-12-11 11:52:41.413929466 +0000 UTC m=+4678.232489045" observedRunningTime="2025-12-11 11:52:41.814988575 +0000 UTC m=+4678.633548164" watchObservedRunningTime="2025-12-11 11:52:41.817380674 +0000 UTC m=+4678.635940273" Dec 11 11:52:48 crc kubenswrapper[5016]: I1211 11:52:48.989249 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:48 crc kubenswrapper[5016]: I1211 11:52:48.990260 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:49 crc kubenswrapper[5016]: I1211 11:52:49.047361 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:49 crc kubenswrapper[5016]: I1211 11:52:49.913666 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:49 crc kubenswrapper[5016]: I1211 11:52:49.962415 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vp2l9"] Dec 11 11:52:51 crc kubenswrapper[5016]: I1211 11:52:51.474945 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:52:51 crc kubenswrapper[5016]: E1211 11:52:51.475644 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:52:51 crc kubenswrapper[5016]: I1211 11:52:51.887744 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vp2l9" podUID="e7cc2faf-2427-422c-bef9-d98a42541a28" containerName="registry-server" containerID="cri-o://bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377" gracePeriod=2 Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.414471 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.561629 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7cc2faf-2427-422c-bef9-d98a42541a28-catalog-content\") pod \"e7cc2faf-2427-422c-bef9-d98a42541a28\" (UID: \"e7cc2faf-2427-422c-bef9-d98a42541a28\") " Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.561729 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hc7p\" (UniqueName: \"kubernetes.io/projected/e7cc2faf-2427-422c-bef9-d98a42541a28-kube-api-access-8hc7p\") pod \"e7cc2faf-2427-422c-bef9-d98a42541a28\" (UID: \"e7cc2faf-2427-422c-bef9-d98a42541a28\") " Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.561757 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7cc2faf-2427-422c-bef9-d98a42541a28-utilities\") pod \"e7cc2faf-2427-422c-bef9-d98a42541a28\" (UID: \"e7cc2faf-2427-422c-bef9-d98a42541a28\") " Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.563196 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7cc2faf-2427-422c-bef9-d98a42541a28-utilities" (OuterVolumeSpecName: "utilities") pod "e7cc2faf-2427-422c-bef9-d98a42541a28" (UID: "e7cc2faf-2427-422c-bef9-d98a42541a28"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.571108 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7cc2faf-2427-422c-bef9-d98a42541a28-kube-api-access-8hc7p" (OuterVolumeSpecName: "kube-api-access-8hc7p") pod "e7cc2faf-2427-422c-bef9-d98a42541a28" (UID: "e7cc2faf-2427-422c-bef9-d98a42541a28"). InnerVolumeSpecName "kube-api-access-8hc7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.593252 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7cc2faf-2427-422c-bef9-d98a42541a28-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e7cc2faf-2427-422c-bef9-d98a42541a28" (UID: "e7cc2faf-2427-422c-bef9-d98a42541a28"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.664201 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7cc2faf-2427-422c-bef9-d98a42541a28-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.664244 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hc7p\" (UniqueName: \"kubernetes.io/projected/e7cc2faf-2427-422c-bef9-d98a42541a28-kube-api-access-8hc7p\") on node \"crc\" DevicePath \"\"" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.664254 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7cc2faf-2427-422c-bef9-d98a42541a28-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.898214 5016 generic.go:334] "Generic (PLEG): container finished" podID="e7cc2faf-2427-422c-bef9-d98a42541a28" containerID="bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377" exitCode=0 Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.898253 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp2l9" event={"ID":"e7cc2faf-2427-422c-bef9-d98a42541a28","Type":"ContainerDied","Data":"bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377"} Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.898285 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp2l9" event={"ID":"e7cc2faf-2427-422c-bef9-d98a42541a28","Type":"ContainerDied","Data":"5fb6c71e09e22a33c142d49af3f2857b2403e45c1571783700a4e12f02d2ae19"} Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.898301 5016 scope.go:117] "RemoveContainer" containerID="bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.898328 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vp2l9" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.917801 5016 scope.go:117] "RemoveContainer" containerID="46a72d16f02f4a8278af8331c96a6577951ea32cf804038f1b5df4bfe15b5e34" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.944812 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vp2l9"] Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.948197 5016 scope.go:117] "RemoveContainer" containerID="c80cdf52db10357294bca2cd668060036c6dfbf588540f9762fcff6200f01ed1" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.955568 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vp2l9"] Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.981445 5016 scope.go:117] "RemoveContainer" containerID="bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377" Dec 11 11:52:52 crc kubenswrapper[5016]: E1211 11:52:52.982456 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377\": container with ID starting with bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377 not found: ID does not exist" containerID="bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.982506 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377"} err="failed to get container status \"bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377\": rpc error: code = NotFound desc = could not find container \"bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377\": container with ID starting with bf076ab7aac30156d26af7d4b6f93388974aeee62ae5fd9b12896e4843578377 not found: ID does not exist" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.982540 5016 scope.go:117] "RemoveContainer" containerID="46a72d16f02f4a8278af8331c96a6577951ea32cf804038f1b5df4bfe15b5e34" Dec 11 11:52:52 crc kubenswrapper[5016]: E1211 11:52:52.983025 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46a72d16f02f4a8278af8331c96a6577951ea32cf804038f1b5df4bfe15b5e34\": container with ID starting with 46a72d16f02f4a8278af8331c96a6577951ea32cf804038f1b5df4bfe15b5e34 not found: ID does not exist" containerID="46a72d16f02f4a8278af8331c96a6577951ea32cf804038f1b5df4bfe15b5e34" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.983069 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46a72d16f02f4a8278af8331c96a6577951ea32cf804038f1b5df4bfe15b5e34"} err="failed to get container status \"46a72d16f02f4a8278af8331c96a6577951ea32cf804038f1b5df4bfe15b5e34\": rpc error: code = NotFound desc = could not find container \"46a72d16f02f4a8278af8331c96a6577951ea32cf804038f1b5df4bfe15b5e34\": container with ID starting with 46a72d16f02f4a8278af8331c96a6577951ea32cf804038f1b5df4bfe15b5e34 not found: ID does not exist" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.983096 5016 scope.go:117] "RemoveContainer" containerID="c80cdf52db10357294bca2cd668060036c6dfbf588540f9762fcff6200f01ed1" Dec 11 11:52:52 crc kubenswrapper[5016]: E1211 11:52:52.983539 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c80cdf52db10357294bca2cd668060036c6dfbf588540f9762fcff6200f01ed1\": container with ID starting with c80cdf52db10357294bca2cd668060036c6dfbf588540f9762fcff6200f01ed1 not found: ID does not exist" containerID="c80cdf52db10357294bca2cd668060036c6dfbf588540f9762fcff6200f01ed1" Dec 11 11:52:52 crc kubenswrapper[5016]: I1211 11:52:52.983587 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c80cdf52db10357294bca2cd668060036c6dfbf588540f9762fcff6200f01ed1"} err="failed to get container status \"c80cdf52db10357294bca2cd668060036c6dfbf588540f9762fcff6200f01ed1\": rpc error: code = NotFound desc = could not find container \"c80cdf52db10357294bca2cd668060036c6dfbf588540f9762fcff6200f01ed1\": container with ID starting with c80cdf52db10357294bca2cd668060036c6dfbf588540f9762fcff6200f01ed1 not found: ID does not exist" Dec 11 11:52:53 crc kubenswrapper[5016]: I1211 11:52:53.486020 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7cc2faf-2427-422c-bef9-d98a42541a28" path="/var/lib/kubelet/pods/e7cc2faf-2427-422c-bef9-d98a42541a28/volumes" Dec 11 11:53:04 crc kubenswrapper[5016]: I1211 11:53:04.474474 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:53:04 crc kubenswrapper[5016]: E1211 11:53:04.476206 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:53:13 crc kubenswrapper[5016]: I1211 11:53:13.755388 5016 scope.go:117] "RemoveContainer" containerID="7c80a5dbf7187132bf97bb747f24c71473106c1bcb4f6d344c8759592f262daa" Dec 11 11:53:17 crc kubenswrapper[5016]: I1211 11:53:17.474427 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:53:17 crc kubenswrapper[5016]: E1211 11:53:17.475152 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:53:29 crc kubenswrapper[5016]: I1211 11:53:29.474699 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:53:29 crc kubenswrapper[5016]: E1211 11:53:29.476691 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:53:44 crc kubenswrapper[5016]: I1211 11:53:44.475930 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:53:44 crc kubenswrapper[5016]: E1211 11:53:44.476618 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.623394 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hblpc"] Dec 11 11:53:50 crc kubenswrapper[5016]: E1211 11:53:50.624736 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7cc2faf-2427-422c-bef9-d98a42541a28" containerName="extract-utilities" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.624757 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7cc2faf-2427-422c-bef9-d98a42541a28" containerName="extract-utilities" Dec 11 11:53:50 crc kubenswrapper[5016]: E1211 11:53:50.624775 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7cc2faf-2427-422c-bef9-d98a42541a28" containerName="registry-server" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.624784 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7cc2faf-2427-422c-bef9-d98a42541a28" containerName="registry-server" Dec 11 11:53:50 crc kubenswrapper[5016]: E1211 11:53:50.624803 5016 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7cc2faf-2427-422c-bef9-d98a42541a28" containerName="extract-content" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.624810 5016 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7cc2faf-2427-422c-bef9-d98a42541a28" containerName="extract-content" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.625110 5016 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7cc2faf-2427-422c-bef9-d98a42541a28" containerName="registry-server" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.626890 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.643386 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hblpc"] Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.784950 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jg2rm\" (UniqueName: \"kubernetes.io/projected/4279b197-af0e-482c-8683-e50f24af4e3c-kube-api-access-jg2rm\") pod \"community-operators-hblpc\" (UID: \"4279b197-af0e-482c-8683-e50f24af4e3c\") " pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.785282 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4279b197-af0e-482c-8683-e50f24af4e3c-utilities\") pod \"community-operators-hblpc\" (UID: \"4279b197-af0e-482c-8683-e50f24af4e3c\") " pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.785362 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4279b197-af0e-482c-8683-e50f24af4e3c-catalog-content\") pod \"community-operators-hblpc\" (UID: \"4279b197-af0e-482c-8683-e50f24af4e3c\") " pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.887722 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4279b197-af0e-482c-8683-e50f24af4e3c-utilities\") pod \"community-operators-hblpc\" (UID: \"4279b197-af0e-482c-8683-e50f24af4e3c\") " pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.887967 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4279b197-af0e-482c-8683-e50f24af4e3c-catalog-content\") pod \"community-operators-hblpc\" (UID: \"4279b197-af0e-482c-8683-e50f24af4e3c\") " pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.888181 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jg2rm\" (UniqueName: \"kubernetes.io/projected/4279b197-af0e-482c-8683-e50f24af4e3c-kube-api-access-jg2rm\") pod \"community-operators-hblpc\" (UID: \"4279b197-af0e-482c-8683-e50f24af4e3c\") " pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.888301 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4279b197-af0e-482c-8683-e50f24af4e3c-utilities\") pod \"community-operators-hblpc\" (UID: \"4279b197-af0e-482c-8683-e50f24af4e3c\") " pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.888383 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4279b197-af0e-482c-8683-e50f24af4e3c-catalog-content\") pod \"community-operators-hblpc\" (UID: \"4279b197-af0e-482c-8683-e50f24af4e3c\") " pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.909089 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jg2rm\" (UniqueName: \"kubernetes.io/projected/4279b197-af0e-482c-8683-e50f24af4e3c-kube-api-access-jg2rm\") pod \"community-operators-hblpc\" (UID: \"4279b197-af0e-482c-8683-e50f24af4e3c\") " pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:53:50 crc kubenswrapper[5016]: I1211 11:53:50.962142 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:53:51 crc kubenswrapper[5016]: I1211 11:53:51.550534 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hblpc"] Dec 11 11:53:52 crc kubenswrapper[5016]: I1211 11:53:52.464439 5016 generic.go:334] "Generic (PLEG): container finished" podID="4279b197-af0e-482c-8683-e50f24af4e3c" containerID="1d292cb925baac9f41c038d95f90c1013a7453e87582d23b1cd4e821f5ae48c2" exitCode=0 Dec 11 11:53:52 crc kubenswrapper[5016]: I1211 11:53:52.464505 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hblpc" event={"ID":"4279b197-af0e-482c-8683-e50f24af4e3c","Type":"ContainerDied","Data":"1d292cb925baac9f41c038d95f90c1013a7453e87582d23b1cd4e821f5ae48c2"} Dec 11 11:53:52 crc kubenswrapper[5016]: I1211 11:53:52.464729 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hblpc" event={"ID":"4279b197-af0e-482c-8683-e50f24af4e3c","Type":"ContainerStarted","Data":"ea15069ecbb5e3dc8f070d980703c9f5b5a838f5e49b6f337c50cfbdc937ee46"} Dec 11 11:53:52 crc kubenswrapper[5016]: I1211 11:53:52.468042 5016 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 11 11:53:54 crc kubenswrapper[5016]: I1211 11:53:54.483484 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hblpc" event={"ID":"4279b197-af0e-482c-8683-e50f24af4e3c","Type":"ContainerStarted","Data":"50a4bdf542f6e3534889c00514b8a9c0d21812c217732cfca2231248625c7329"} Dec 11 11:53:55 crc kubenswrapper[5016]: I1211 11:53:55.495165 5016 generic.go:334] "Generic (PLEG): container finished" podID="4279b197-af0e-482c-8683-e50f24af4e3c" containerID="50a4bdf542f6e3534889c00514b8a9c0d21812c217732cfca2231248625c7329" exitCode=0 Dec 11 11:53:55 crc kubenswrapper[5016]: I1211 11:53:55.495261 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hblpc" event={"ID":"4279b197-af0e-482c-8683-e50f24af4e3c","Type":"ContainerDied","Data":"50a4bdf542f6e3534889c00514b8a9c0d21812c217732cfca2231248625c7329"} Dec 11 11:53:57 crc kubenswrapper[5016]: I1211 11:53:57.474285 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:53:57 crc kubenswrapper[5016]: E1211 11:53:57.474803 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:53:58 crc kubenswrapper[5016]: I1211 11:53:58.522660 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hblpc" event={"ID":"4279b197-af0e-482c-8683-e50f24af4e3c","Type":"ContainerStarted","Data":"d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824"} Dec 11 11:53:58 crc kubenswrapper[5016]: I1211 11:53:58.544442 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hblpc" podStartSLOduration=3.687462978 podStartE2EDuration="8.544422751s" podCreationTimestamp="2025-12-11 11:53:50 +0000 UTC" firstStartedPulling="2025-12-11 11:53:52.467694874 +0000 UTC m=+4749.286254473" lastFinishedPulling="2025-12-11 11:53:57.324654667 +0000 UTC m=+4754.143214246" observedRunningTime="2025-12-11 11:53:58.541794348 +0000 UTC m=+4755.360353957" watchObservedRunningTime="2025-12-11 11:53:58.544422751 +0000 UTC m=+4755.362982340" Dec 11 11:54:00 crc kubenswrapper[5016]: I1211 11:54:00.962824 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:54:00 crc kubenswrapper[5016]: I1211 11:54:00.963416 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:54:01 crc kubenswrapper[5016]: I1211 11:54:01.013319 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.181554 5016 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5kmc7"] Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.186780 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.193984 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5kmc7"] Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.362082 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88fe4cb8-9c5a-4108-b184-0569b4e3471d-utilities\") pod \"certified-operators-5kmc7\" (UID: \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\") " pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.362238 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88fe4cb8-9c5a-4108-b184-0569b4e3471d-catalog-content\") pod \"certified-operators-5kmc7\" (UID: \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\") " pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.362503 5016 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vw7k\" (UniqueName: \"kubernetes.io/projected/88fe4cb8-9c5a-4108-b184-0569b4e3471d-kube-api-access-5vw7k\") pod \"certified-operators-5kmc7\" (UID: \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\") " pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.465200 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vw7k\" (UniqueName: \"kubernetes.io/projected/88fe4cb8-9c5a-4108-b184-0569b4e3471d-kube-api-access-5vw7k\") pod \"certified-operators-5kmc7\" (UID: \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\") " pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.465485 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88fe4cb8-9c5a-4108-b184-0569b4e3471d-utilities\") pod \"certified-operators-5kmc7\" (UID: \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\") " pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.465710 5016 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88fe4cb8-9c5a-4108-b184-0569b4e3471d-catalog-content\") pod \"certified-operators-5kmc7\" (UID: \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\") " pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.466811 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88fe4cb8-9c5a-4108-b184-0569b4e3471d-catalog-content\") pod \"certified-operators-5kmc7\" (UID: \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\") " pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.466874 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88fe4cb8-9c5a-4108-b184-0569b4e3471d-utilities\") pod \"certified-operators-5kmc7\" (UID: \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\") " pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.490725 5016 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vw7k\" (UniqueName: \"kubernetes.io/projected/88fe4cb8-9c5a-4108-b184-0569b4e3471d-kube-api-access-5vw7k\") pod \"certified-operators-5kmc7\" (UID: \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\") " pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:07 crc kubenswrapper[5016]: I1211 11:54:07.524757 5016 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:08 crc kubenswrapper[5016]: I1211 11:54:08.123464 5016 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5kmc7"] Dec 11 11:54:08 crc kubenswrapper[5016]: I1211 11:54:08.618905 5016 generic.go:334] "Generic (PLEG): container finished" podID="88fe4cb8-9c5a-4108-b184-0569b4e3471d" containerID="103b0e1279b9a4d31be8a7aadfe2dcacbb42108408d065d0cf4a1b21220147c0" exitCode=0 Dec 11 11:54:08 crc kubenswrapper[5016]: I1211 11:54:08.619007 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kmc7" event={"ID":"88fe4cb8-9c5a-4108-b184-0569b4e3471d","Type":"ContainerDied","Data":"103b0e1279b9a4d31be8a7aadfe2dcacbb42108408d065d0cf4a1b21220147c0"} Dec 11 11:54:08 crc kubenswrapper[5016]: I1211 11:54:08.619114 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kmc7" event={"ID":"88fe4cb8-9c5a-4108-b184-0569b4e3471d","Type":"ContainerStarted","Data":"2a22571020193cad9545759b717075c62904cdb52a52358c6481f20ef055e8c4"} Dec 11 11:54:10 crc kubenswrapper[5016]: I1211 11:54:10.474497 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:54:10 crc kubenswrapper[5016]: E1211 11:54:10.476883 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:54:11 crc kubenswrapper[5016]: I1211 11:54:11.036387 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:54:11 crc kubenswrapper[5016]: I1211 11:54:11.544045 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hblpc"] Dec 11 11:54:11 crc kubenswrapper[5016]: I1211 11:54:11.652872 5016 generic.go:334] "Generic (PLEG): container finished" podID="88fe4cb8-9c5a-4108-b184-0569b4e3471d" containerID="d698a235afd582844e7c5e09d937e194d269be7775b759d1b64dd1024275b06d" exitCode=0 Dec 11 11:54:11 crc kubenswrapper[5016]: I1211 11:54:11.653099 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hblpc" podUID="4279b197-af0e-482c-8683-e50f24af4e3c" containerName="registry-server" containerID="cri-o://d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824" gracePeriod=2 Dec 11 11:54:11 crc kubenswrapper[5016]: I1211 11:54:11.653384 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kmc7" event={"ID":"88fe4cb8-9c5a-4108-b184-0569b4e3471d","Type":"ContainerDied","Data":"d698a235afd582844e7c5e09d937e194d269be7775b759d1b64dd1024275b06d"} Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.102466 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.299809 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jg2rm\" (UniqueName: \"kubernetes.io/projected/4279b197-af0e-482c-8683-e50f24af4e3c-kube-api-access-jg2rm\") pod \"4279b197-af0e-482c-8683-e50f24af4e3c\" (UID: \"4279b197-af0e-482c-8683-e50f24af4e3c\") " Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.299896 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4279b197-af0e-482c-8683-e50f24af4e3c-utilities\") pod \"4279b197-af0e-482c-8683-e50f24af4e3c\" (UID: \"4279b197-af0e-482c-8683-e50f24af4e3c\") " Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.299966 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4279b197-af0e-482c-8683-e50f24af4e3c-catalog-content\") pod \"4279b197-af0e-482c-8683-e50f24af4e3c\" (UID: \"4279b197-af0e-482c-8683-e50f24af4e3c\") " Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.301371 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4279b197-af0e-482c-8683-e50f24af4e3c-utilities" (OuterVolumeSpecName: "utilities") pod "4279b197-af0e-482c-8683-e50f24af4e3c" (UID: "4279b197-af0e-482c-8683-e50f24af4e3c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.310353 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4279b197-af0e-482c-8683-e50f24af4e3c-kube-api-access-jg2rm" (OuterVolumeSpecName: "kube-api-access-jg2rm") pod "4279b197-af0e-482c-8683-e50f24af4e3c" (UID: "4279b197-af0e-482c-8683-e50f24af4e3c"). InnerVolumeSpecName "kube-api-access-jg2rm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.324048 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jg2rm\" (UniqueName: \"kubernetes.io/projected/4279b197-af0e-482c-8683-e50f24af4e3c-kube-api-access-jg2rm\") on node \"crc\" DevicePath \"\"" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.324093 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4279b197-af0e-482c-8683-e50f24af4e3c-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.378419 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4279b197-af0e-482c-8683-e50f24af4e3c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4279b197-af0e-482c-8683-e50f24af4e3c" (UID: "4279b197-af0e-482c-8683-e50f24af4e3c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.426230 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4279b197-af0e-482c-8683-e50f24af4e3c-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.665595 5016 generic.go:334] "Generic (PLEG): container finished" podID="4279b197-af0e-482c-8683-e50f24af4e3c" containerID="d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824" exitCode=0 Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.666609 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hblpc" event={"ID":"4279b197-af0e-482c-8683-e50f24af4e3c","Type":"ContainerDied","Data":"d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824"} Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.666647 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hblpc" event={"ID":"4279b197-af0e-482c-8683-e50f24af4e3c","Type":"ContainerDied","Data":"ea15069ecbb5e3dc8f070d980703c9f5b5a838f5e49b6f337c50cfbdc937ee46"} Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.666671 5016 scope.go:117] "RemoveContainer" containerID="d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.666866 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hblpc" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.677519 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kmc7" event={"ID":"88fe4cb8-9c5a-4108-b184-0569b4e3471d","Type":"ContainerStarted","Data":"2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc"} Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.704557 5016 scope.go:117] "RemoveContainer" containerID="50a4bdf542f6e3534889c00514b8a9c0d21812c217732cfca2231248625c7329" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.706871 5016 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5kmc7" podStartSLOduration=2.092638586 podStartE2EDuration="5.706833682s" podCreationTimestamp="2025-12-11 11:54:07 +0000 UTC" firstStartedPulling="2025-12-11 11:54:08.623682962 +0000 UTC m=+4765.442242541" lastFinishedPulling="2025-12-11 11:54:12.237878048 +0000 UTC m=+4769.056437637" observedRunningTime="2025-12-11 11:54:12.699267787 +0000 UTC m=+4769.517827366" watchObservedRunningTime="2025-12-11 11:54:12.706833682 +0000 UTC m=+4769.525393271" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.737022 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hblpc"] Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.744244 5016 scope.go:117] "RemoveContainer" containerID="1d292cb925baac9f41c038d95f90c1013a7453e87582d23b1cd4e821f5ae48c2" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.748074 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hblpc"] Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.774743 5016 scope.go:117] "RemoveContainer" containerID="d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824" Dec 11 11:54:12 crc kubenswrapper[5016]: E1211 11:54:12.775355 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824\": container with ID starting with d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824 not found: ID does not exist" containerID="d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.775398 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824"} err="failed to get container status \"d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824\": rpc error: code = NotFound desc = could not find container \"d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824\": container with ID starting with d06260ed1db4885ffc4be8fcc2fa654dd2f4e4e2ccdcdc0c7da8939111462824 not found: ID does not exist" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.775430 5016 scope.go:117] "RemoveContainer" containerID="50a4bdf542f6e3534889c00514b8a9c0d21812c217732cfca2231248625c7329" Dec 11 11:54:12 crc kubenswrapper[5016]: E1211 11:54:12.775698 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50a4bdf542f6e3534889c00514b8a9c0d21812c217732cfca2231248625c7329\": container with ID starting with 50a4bdf542f6e3534889c00514b8a9c0d21812c217732cfca2231248625c7329 not found: ID does not exist" containerID="50a4bdf542f6e3534889c00514b8a9c0d21812c217732cfca2231248625c7329" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.775727 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50a4bdf542f6e3534889c00514b8a9c0d21812c217732cfca2231248625c7329"} err="failed to get container status \"50a4bdf542f6e3534889c00514b8a9c0d21812c217732cfca2231248625c7329\": rpc error: code = NotFound desc = could not find container \"50a4bdf542f6e3534889c00514b8a9c0d21812c217732cfca2231248625c7329\": container with ID starting with 50a4bdf542f6e3534889c00514b8a9c0d21812c217732cfca2231248625c7329 not found: ID does not exist" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.775749 5016 scope.go:117] "RemoveContainer" containerID="1d292cb925baac9f41c038d95f90c1013a7453e87582d23b1cd4e821f5ae48c2" Dec 11 11:54:12 crc kubenswrapper[5016]: E1211 11:54:12.776111 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d292cb925baac9f41c038d95f90c1013a7453e87582d23b1cd4e821f5ae48c2\": container with ID starting with 1d292cb925baac9f41c038d95f90c1013a7453e87582d23b1cd4e821f5ae48c2 not found: ID does not exist" containerID="1d292cb925baac9f41c038d95f90c1013a7453e87582d23b1cd4e821f5ae48c2" Dec 11 11:54:12 crc kubenswrapper[5016]: I1211 11:54:12.776145 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d292cb925baac9f41c038d95f90c1013a7453e87582d23b1cd4e821f5ae48c2"} err="failed to get container status \"1d292cb925baac9f41c038d95f90c1013a7453e87582d23b1cd4e821f5ae48c2\": rpc error: code = NotFound desc = could not find container \"1d292cb925baac9f41c038d95f90c1013a7453e87582d23b1cd4e821f5ae48c2\": container with ID starting with 1d292cb925baac9f41c038d95f90c1013a7453e87582d23b1cd4e821f5ae48c2 not found: ID does not exist" Dec 11 11:54:13 crc kubenswrapper[5016]: I1211 11:54:13.490595 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4279b197-af0e-482c-8683-e50f24af4e3c" path="/var/lib/kubelet/pods/4279b197-af0e-482c-8683-e50f24af4e3c/volumes" Dec 11 11:54:17 crc kubenswrapper[5016]: I1211 11:54:17.525707 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:17 crc kubenswrapper[5016]: I1211 11:54:17.526554 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:17 crc kubenswrapper[5016]: I1211 11:54:17.613789 5016 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:17 crc kubenswrapper[5016]: I1211 11:54:17.785298 5016 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:17 crc kubenswrapper[5016]: I1211 11:54:17.875988 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5kmc7"] Dec 11 11:54:19 crc kubenswrapper[5016]: I1211 11:54:19.754512 5016 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5kmc7" podUID="88fe4cb8-9c5a-4108-b184-0569b4e3471d" containerName="registry-server" containerID="cri-o://2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc" gracePeriod=2 Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.264575 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.308694 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vw7k\" (UniqueName: \"kubernetes.io/projected/88fe4cb8-9c5a-4108-b184-0569b4e3471d-kube-api-access-5vw7k\") pod \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\" (UID: \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\") " Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.309224 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88fe4cb8-9c5a-4108-b184-0569b4e3471d-catalog-content\") pod \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\" (UID: \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\") " Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.309273 5016 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88fe4cb8-9c5a-4108-b184-0569b4e3471d-utilities\") pod \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\" (UID: \"88fe4cb8-9c5a-4108-b184-0569b4e3471d\") " Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.311846 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88fe4cb8-9c5a-4108-b184-0569b4e3471d-utilities" (OuterVolumeSpecName: "utilities") pod "88fe4cb8-9c5a-4108-b184-0569b4e3471d" (UID: "88fe4cb8-9c5a-4108-b184-0569b4e3471d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.316655 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88fe4cb8-9c5a-4108-b184-0569b4e3471d-kube-api-access-5vw7k" (OuterVolumeSpecName: "kube-api-access-5vw7k") pod "88fe4cb8-9c5a-4108-b184-0569b4e3471d" (UID: "88fe4cb8-9c5a-4108-b184-0569b4e3471d"). InnerVolumeSpecName "kube-api-access-5vw7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.389927 5016 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88fe4cb8-9c5a-4108-b184-0569b4e3471d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "88fe4cb8-9c5a-4108-b184-0569b4e3471d" (UID: "88fe4cb8-9c5a-4108-b184-0569b4e3471d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.414316 5016 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88fe4cb8-9c5a-4108-b184-0569b4e3471d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.414370 5016 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88fe4cb8-9c5a-4108-b184-0569b4e3471d-utilities\") on node \"crc\" DevicePath \"\"" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.414385 5016 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vw7k\" (UniqueName: \"kubernetes.io/projected/88fe4cb8-9c5a-4108-b184-0569b4e3471d-kube-api-access-5vw7k\") on node \"crc\" DevicePath \"\"" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.770612 5016 generic.go:334] "Generic (PLEG): container finished" podID="88fe4cb8-9c5a-4108-b184-0569b4e3471d" containerID="2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc" exitCode=0 Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.770850 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kmc7" event={"ID":"88fe4cb8-9c5a-4108-b184-0569b4e3471d","Type":"ContainerDied","Data":"2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc"} Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.772494 5016 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5kmc7" event={"ID":"88fe4cb8-9c5a-4108-b184-0569b4e3471d","Type":"ContainerDied","Data":"2a22571020193cad9545759b717075c62904cdb52a52358c6481f20ef055e8c4"} Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.771005 5016 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5kmc7" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.772618 5016 scope.go:117] "RemoveContainer" containerID="2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.803100 5016 scope.go:117] "RemoveContainer" containerID="d698a235afd582844e7c5e09d937e194d269be7775b759d1b64dd1024275b06d" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.827813 5016 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5kmc7"] Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.836744 5016 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5kmc7"] Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.845922 5016 scope.go:117] "RemoveContainer" containerID="103b0e1279b9a4d31be8a7aadfe2dcacbb42108408d065d0cf4a1b21220147c0" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.886492 5016 scope.go:117] "RemoveContainer" containerID="2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc" Dec 11 11:54:20 crc kubenswrapper[5016]: E1211 11:54:20.887210 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc\": container with ID starting with 2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc not found: ID does not exist" containerID="2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.887253 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc"} err="failed to get container status \"2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc\": rpc error: code = NotFound desc = could not find container \"2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc\": container with ID starting with 2a0ddca22aac74f35a4a2d6e33a76d8e0a08492eeb81257ffd9b2dd4c77ef8fc not found: ID does not exist" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.887280 5016 scope.go:117] "RemoveContainer" containerID="d698a235afd582844e7c5e09d937e194d269be7775b759d1b64dd1024275b06d" Dec 11 11:54:20 crc kubenswrapper[5016]: E1211 11:54:20.887586 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d698a235afd582844e7c5e09d937e194d269be7775b759d1b64dd1024275b06d\": container with ID starting with d698a235afd582844e7c5e09d937e194d269be7775b759d1b64dd1024275b06d not found: ID does not exist" containerID="d698a235afd582844e7c5e09d937e194d269be7775b759d1b64dd1024275b06d" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.887631 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d698a235afd582844e7c5e09d937e194d269be7775b759d1b64dd1024275b06d"} err="failed to get container status \"d698a235afd582844e7c5e09d937e194d269be7775b759d1b64dd1024275b06d\": rpc error: code = NotFound desc = could not find container \"d698a235afd582844e7c5e09d937e194d269be7775b759d1b64dd1024275b06d\": container with ID starting with d698a235afd582844e7c5e09d937e194d269be7775b759d1b64dd1024275b06d not found: ID does not exist" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.887659 5016 scope.go:117] "RemoveContainer" containerID="103b0e1279b9a4d31be8a7aadfe2dcacbb42108408d065d0cf4a1b21220147c0" Dec 11 11:54:20 crc kubenswrapper[5016]: E1211 11:54:20.887962 5016 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"103b0e1279b9a4d31be8a7aadfe2dcacbb42108408d065d0cf4a1b21220147c0\": container with ID starting with 103b0e1279b9a4d31be8a7aadfe2dcacbb42108408d065d0cf4a1b21220147c0 not found: ID does not exist" containerID="103b0e1279b9a4d31be8a7aadfe2dcacbb42108408d065d0cf4a1b21220147c0" Dec 11 11:54:20 crc kubenswrapper[5016]: I1211 11:54:20.887988 5016 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"103b0e1279b9a4d31be8a7aadfe2dcacbb42108408d065d0cf4a1b21220147c0"} err="failed to get container status \"103b0e1279b9a4d31be8a7aadfe2dcacbb42108408d065d0cf4a1b21220147c0\": rpc error: code = NotFound desc = could not find container \"103b0e1279b9a4d31be8a7aadfe2dcacbb42108408d065d0cf4a1b21220147c0\": container with ID starting with 103b0e1279b9a4d31be8a7aadfe2dcacbb42108408d065d0cf4a1b21220147c0 not found: ID does not exist" Dec 11 11:54:21 crc kubenswrapper[5016]: I1211 11:54:21.476533 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:54:21 crc kubenswrapper[5016]: E1211 11:54:21.477248 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:54:21 crc kubenswrapper[5016]: I1211 11:54:21.500719 5016 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88fe4cb8-9c5a-4108-b184-0569b4e3471d" path="/var/lib/kubelet/pods/88fe4cb8-9c5a-4108-b184-0569b4e3471d/volumes" Dec 11 11:54:35 crc kubenswrapper[5016]: I1211 11:54:35.475584 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:54:35 crc kubenswrapper[5016]: E1211 11:54:35.476369 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:54:47 crc kubenswrapper[5016]: I1211 11:54:47.474731 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:54:47 crc kubenswrapper[5016]: E1211 11:54:47.475879 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:55:02 crc kubenswrapper[5016]: I1211 11:55:02.475151 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:55:02 crc kubenswrapper[5016]: E1211 11:55:02.476473 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:55:13 crc kubenswrapper[5016]: I1211 11:55:13.912269 5016 scope.go:117] "RemoveContainer" containerID="1a91e41cb59b0c1801db987003aaac8ea505b195b592c118690674bbbb62706a" Dec 11 11:55:13 crc kubenswrapper[5016]: I1211 11:55:13.937430 5016 scope.go:117] "RemoveContainer" containerID="fa99ed64e49b2a9e4d7a5da6b1f91cd53f6acd182f8c0e43cce185a0bc3a654f" Dec 11 11:55:13 crc kubenswrapper[5016]: I1211 11:55:13.980769 5016 scope.go:117] "RemoveContainer" containerID="03a038d644dba62294971f6d09c02a954123a99105e97fa334b98d5cc20c193a" Dec 11 11:55:14 crc kubenswrapper[5016]: I1211 11:55:14.474380 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:55:14 crc kubenswrapper[5016]: E1211 11:55:14.474911 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" Dec 11 11:55:28 crc kubenswrapper[5016]: I1211 11:55:28.474831 5016 scope.go:117] "RemoveContainer" containerID="6636ecc26dbf0d5360b8af5a64285fa8d9a38d53f03444050a11c3f269f558c8" Dec 11 11:55:28 crc kubenswrapper[5016]: E1211 11:55:28.476693 5016 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2x7t7_openshift-machine-config-operator(e679c083-2480-4bc8-a8ea-dc2ff0412508)\"" pod="openshift-machine-config-operator/machine-config-daemon-2x7t7" podUID="e679c083-2480-4bc8-a8ea-dc2ff0412508" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515116530502024443 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015116530502017360 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015116516570016514 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015116516570015464 5ustar corecore